federi commited on
Commit
61efb41
·
verified ·
1 Parent(s): 7eced1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -111
app.py CHANGED
@@ -8,134 +8,120 @@ import secrets
8
  cohere_api_key = os.getenv("COHERE_API_KEY")
9
  co = cohere.Client(cohere_api_key, client_name="huggingface-aya-23")
10
 
11
- SYSTEM_PROMPT = "Mi chiamo Diogene, rispondo solo in lingua italiana e sono un filosofo"
12
-
13
  def trigger_example(example):
14
- chat, updated_history = generate_response(example, "", None)
15
- return chat, updated_history
16
-
17
- def generate_response(user_message, cid, token, history=None):
18
 
19
- if not token:
20
- raise gr.Error("Error loading.")
21
-
22
- if history is None:
23
- history = []
24
- if not cid:
25
- cid = str(uuid.uuid4())
26
 
27
- print(f"cid: {cid} prompt:{user_message}")
28
-
29
- history.append(user_message)
30
-
31
- full_prompt = f"{SYSTEM_PROMPT}\n\n{user_message}"
32
-
33
- stream = co.chat_stream(message=full_prompt, conversation_id=cid, model='c4ai-aya-23', connectors=[], temperature=0.3)
34
 
35
- output = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
- for idx, response in enumerate(stream):
38
- if response.event_type == "text-generation":
39
- output += response.text
40
- if idx == 0:
41
- history.append(" " + output)
42
- else:
43
- history[-1] = output
44
-
45
- chat = [
46
- (history[i].strip(), history[i + 1].strip())
47
- for i in range(0, len(history) - 1, 2)
48
- ]
49
- yield chat, history, cid
50
-
51
- return chat, history, cid
52
-
53
-
54
  def clear_chat():
55
- return [], [], str(uuid.uuid4())
56
-
57
 
58
  examples = [
59
- "Explain the relativity theory in French",
60
- "Como sair de um helicóptero que caiu na água?",
61
- "¿Cómo le explicarías el aprendizaje automático a un extraterrestre?",
62
- "Explain gravity to a chicken.",
63
- "Descrivi il processo di creazione di un capolavoro, come se fossi un artista del Rinascimento a Firenze.",
64
- "Anneme onu ne kadar sevdiğimi anlatan bir mektup yaz",
65
- "Explique-moi le sens de la vie selon un grand auteur littéraire.",
66
- "Give me an example of an endangered species and let me know what I can do to help preserve it",
67
- "یک پاراگراف در مورد زیبایی‌های طبیعت در فصل پاییز بنویس",
68
- "Wie kann ich lernen, selbstbewusster zu werden?",
69
- "Formally introduce the transformer architecture with notation.",
 
70
  ]
71
 
72
  custom_css = """
73
  #logo-img {
74
- border: none !important;
75
  }
76
  #chat-message {
77
- font-size: 14px;
78
- min-height: 300px;
79
  }
80
  """
81
 
82
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
83
- cid = gr.State("")
84
- token = gr.State(value=None)
85
-
86
- with gr.Row():
87
- with gr.Column(scale=1):
88
- gr.Image("aya-logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
89
- with gr.Column(scale=3):
90
- gr.Markdown("""C4AI Aya 23 is a research open weights release of an 8 and 35 billion parameter with highly advanced instruction fine-tuned model, covering 23 languages: Arabic, Chinese (simplified & traditional), Czech, Dutch, English, French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Korean, Persian, Polish, Portuguese, Romanian, Russian, Spanish, Turkish, Ukrainian, and Vietnamese.
91
- <br/>
92
- **Note**: Aya 23 is a single-turn instruction-following model and it is not optimized for chat mode use.
93
- <br/>
94
- **Model**: [aya-23-35B](https://huggingface.co/CohereForAI/aya-23-35B)
95
- <br/>
96
- **Developed by**: [Cohere for AI](https://cohere.com/research) and [Cohere](https://cohere.com/)
97
- <br/>
98
- **License**: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy)
99
- """
100
- )
101
-
102
- with gr.Column():
103
- with gr.Row():
104
- chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
105
-
106
- with gr.Row():
107
- user_message = gr.Textbox(lines=1, placeholder="Ask anything ...", label="Input", show_label=False)
108
-
109
- with gr.Row():
110
- submit_button = gr.Button("Submit")
111
- clear_button = gr.Button("Clear chat")
112
-
113
- history = gr.State([])
114
-
115
- user_message.submit(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
116
- submit_button.click(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
117
-
118
- clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history, cid], concurrency_limit=32)
119
 
120
- user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
121
- submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
122
- clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
- with gr.Row():
125
- gr.Examples(
126
- examples=examples,
127
- inputs=user_message,
128
- cache_examples=False,
129
- fn=trigger_example,
130
- outputs=[chatbot],
131
- examples_per_page=100
132
- )
133
-
134
- demo.load(lambda: secrets.token_hex(16), None, token)
135
-
136
- if __name__ == "__main__":
137
- # demo.launch(debug=True)
138
- try:
139
- demo.queue(api_open=False, max_size=40).launch(show_api=False)
140
- except Exception as e:
141
- print(f"Error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  cohere_api_key = os.getenv("COHERE_API_KEY")
9
  co = cohere.Client(cohere_api_key, client_name="huggingface-aya-23")
10
 
 
 
11
  def trigger_example(example):
12
+ chat, updated_history = generate_response(example)
13
+ return chat, updated_history
 
 
14
 
15
+ def generate_response(user_message, cid, token, history=None):
 
 
 
 
 
 
16
 
17
+ if not token:
18
+ raise gr.Error("Error loading.")
 
 
 
 
 
19
 
20
+ if history is None:
21
+ history = []
22
+ if cid == "" or None:
23
+ cid = str(uuid.uuid4())
24
+ print(f"cid: {cid} prompt:{user_message}")
25
+ history.append(user_message)
26
+ stream = co.chat_stream(message=user_message, conversation_id=cid, model='c4ai-aya-23', connectors=[], temperature=0.3)
27
+ #stream = co.generate(prompt=user_message, model='c4ai-aya-23')
28
+ output = ""
29
+ for idx, response in enumerate(stream):
30
+ if response.event_type == "text-generation":
31
+ output += response.text
32
+ if idx == 0:
33
+ history.append(" " + output)
34
+ else:
35
+ history[-1] = output
36
+ chat = [
37
+ (history[i].strip(), history[i + 1].strip())
38
+ for i in range(0, len(history) - 1, 2)
39
+ ]
40
+ yield chat, history, cid
41
 
42
+ return chat, history, cid
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  def clear_chat():
44
+ return [], [], str(uuid.uuid4())
 
45
 
46
  examples = [
47
+ "Explain the relativity theory in French",
48
+ "Como sair de um helicóptero que caiu na água?",
49
+ "¿Cómo le explicarías el aprendizaje automático a un extraterrestre?",
50
+ "Explain gravity to a chicken.",
51
+ "Descrivi il processo di creazione di un capolavoro, come se fossi un artista del Rinascimento a Firenze.",
52
+ "Anneme onu ne kadar sevdiğimi anlatan bir mektup yaz",
53
+ "Explique-moi le sens de la vie selon un grand auteur littéraire.",
54
+ "Give me an example of an endangered species and let me know what I can do to help preserve it",
55
+ "یک پاراگراف در مورد زیبایی‌های طبیعت در فصل پاییز بنویس",
56
+ "Wie kann ich lernen, selbstbewusster zu werden?",
57
+ "Formally introduce the transformer architecture with notation.",
58
+
59
  ]
60
 
61
  custom_css = """
62
  #logo-img {
63
+ border: none !important;
64
  }
65
  #chat-message {
66
+ font-size: 14px;
67
+ min-height: 300px;
68
  }
69
  """
70
 
71
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
72
+ cid = gr.State("")
73
+ token = gr.State(value=None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
+ with gr.Row():
76
+ with gr.Column(scale=1):
77
+ gr.Image("aya-logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
78
+ with gr.Column(scale=3):
79
+ gr.Markdown("""C4AI Aya 23 is a research open weights release of an 8 and 35 billion parameter with highly advanced instruction fine-tuned model, covering 23 languages: Arabic, Chinese (simplified & traditional), Czech, Dutch, English, French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Korean, Persian, Polish, Portuguese, Romanian, Russian, Spanish, Turkish, Ukrainian, and Vietnamese.
80
+ <br/>
81
+ **Note**: Aya 23 is a single-turn instruction-following model and it is not optimized for chat mode use.
82
+ <br/>
83
+ **Model**: [aya-23-35B](https://huggingface.co/CohereForAI/aya-23-35B)
84
+ <br/>
85
+ **Developed by**: [Cohere for AI](https://cohere.com/research) and [Cohere](https://cohere.com/)
86
+ <br/>
87
+ **License**: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy)
88
+ """
89
+ )
90
 
91
+ with gr.Column():
92
+ with gr.Row():
93
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
94
+
95
+ with gr.Row():
96
+ user_message = gr.Textbox(lines=1, placeholder="Ask anything ...", label="Input", show_label=False)
97
+
98
+ with gr.Row():
99
+ submit_button = gr.Button("Submit")
100
+ clear_button = gr.Button("Clear chat")
101
+
102
+ history = gr.State([])
103
+
104
+ user_message.submit(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
105
+ submit_button.click(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
106
+
107
+ clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history, cid], concurrency_limit=32)
108
+ user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
109
+ submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
110
+ clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
111
+
112
+ with gr.Row():
113
+ gr.Examples(
114
+ examples=examples,
115
+ inputs=user_message,
116
+ cache_examples=False,
117
+ fn=trigger_example,
118
+ outputs=[chatbot],
119
+ examples_per_page=100
120
+ )
121
+ demo.load(lambda: secrets.token_hex(16), None, token)
122
+ if name == "main":
123
+ # demo.launch(debug=True)
124
+ try:
125
+ demo.queue(api_open=False, max_size=40).launch(show_api=False)
126
+ except Exception as e:
127
+ print(f"Error: {e}"