Spaces:
Sleeping
Sleeping
File size: 1,375 Bytes
188d71d a8f8d55 5566ae0 a8f8d55 188d71d a8f8d55 188d71d 70a3ce4 a8f8d55 70a3ce4 188d71d a8f8d55 188d71d a8f8d55 188d71d a8f8d55 188d71d fd81378 188d71d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from transformers import pipeline
import ast
pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", device=-1)
createListSysPrompt = (
"Please create a Python dictionary named 'list' that you will use to keep track of whatever is asked for. "
"The format should be exactly like this:\n\n"
"list = {\n"
" 'typeOfThing1': ['exOfThing1_1', 'exOfThing1_2'],\n"
" 'typeOfThing2': ['exOfThing2_1'],\n"
"}\n\n"
"Always return a valid Python dictionary with keys as categories and values as lists of items."
)
updateListSysPrompt = (
"If you see mention of a thing of a type in the list that's not in the list, add it to the list."
)
def updateList(newText):
preparePrompt = []
list = []
if list == []:
preparePrompt.append({"role": "system", "content": createListSysPrompt})
else:
preparePrompt.append({"role": "system", "content": updateListSysPrompt})
preparePrompt.append({"role": "user", "content": newText})
prompt = pipe.tokenizer.apply_chat_template(preparePrompt, tokenize=False, add_generation_prompt=True)
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
return outputs[0]["generated_text"]
demo = gr.Interface(fn=updateList, inputs="textbox", outputs="textbox")
if __name__ == "__main__":
demo.launch()
|