daniloedu commited on
Commit
3d441a2
Β·
1 Parent(s): f9d442c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -12
app.py CHANGED
@@ -1,15 +1,55 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
- pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
5
-
6
- def predict(image):
7
- predictions = pipeline(image)
8
- return {p["label"]: p["score"] for p in predictions}
9
-
10
- gr.Interface(
11
- predict,
12
- inputs=gr.inputs.Image(label="Upload hot dog candidate", type="filepath"),
13
- outputs=gr.outputs.Label(num_top_classes=2),
14
- title="Hot Dog? Or Not?",
15
- ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from transformers import pipeline
3
 
4
+ client = pipeline("text-generation", model="upstage/Llama-2-70b-instruct", api_url=API_URL)
5
+
6
+ def format_chat_prompt(message, chat_history, instruction):
7
+ prompt = f"System:{instruction}"
8
+ for turn in chat_history:
9
+ user_message, bot_message = turn
10
+ prompt = f"{prompt}\nUser: {user_message}\nAssistant: {bot_message}"
11
+ prompt = f"{prompt}\nUser: {message}\nAssistant:"
12
+ return prompt
13
+
14
+ def respond(message, chat_history, instruction, temperature=0.7):
15
+ prompt = format_chat_prompt(message, chat_history, instruction)
16
+ chat_history = chat_history + [[message, ""]]
17
+ stream = client.generate_stream(prompt,
18
+ max_new_tokens=1024,
19
+ stop_sequences=["\nUser:", "<|endoftext|>"],
20
+ temperature=temperature)
21
+ #stop_sequences to not generate the user answer
22
+ acc_text = ""
23
+ #Streaming the tokens
24
+ for idx, response in enumerate(stream):
25
+ text_token = response.token.text
26
+
27
+ if response.details:
28
+ return
29
+
30
+ if idx == 0 and text_token.startswith(" "):
31
+ text_token = text_token[1:]
32
+
33
+ if any(word in text_token for word in SAFETY_GUIDELINES):
34
+ continue
35
+
36
+ acc_text += text_token
37
+ last_turn = list(chat_history.pop(-1))
38
+ last_turn[-1] += acc_text
39
+ chat_history = chat_history + [last_turn]
40
+ yield "", chat_history
41
+ acc_text = ""
42
+
43
+ with gr.Blocks() as demo:
44
+ chatbot = gr.Chatbot(height=240) #just to fit the notebook
45
+ msg = gr.Textbox(label="Prompt")
46
+ with gr.Accordion(label="Advanced options",open=False):
47
+ system = gr.Textbox(label="System message", lines=2, value="A conversation between a user and an LLM-based AI assistant. The assistant gives helpful and honest answers.")
48
+ temperature = gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.7, step=0.1)
49
+ btn = gr.Button("Submit")
50
+ clear = gr.ClearButton(components=[msg, chatbot], value="Clear console")
51
+
52
+ btn.click(respond, inputs=[msg, chatbot, system], outputs=[msg, chatbot])
53
+ msg.submit(respond, inputs=[msg, chatbot, system], outputs=[msg, chatbot]) #Press enter to submit
54
+ gr.close_all()
55
+ demo.queue().launch(share=True, server_port=int(os.environ['PORT4']))