federi commited on
Commit
d9095de
·
verified ·
1 Parent(s): 21c30cc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -72
app.py CHANGED
@@ -5,13 +5,16 @@ import re
5
  import uuid
6
  import secrets
7
 
 
 
8
  cohere_api_key = os.getenv("COHERE_API_KEY")
9
  co = cohere.Client(cohere_api_key, client_name="huggingface-aya-23")
10
 
 
11
  def trigger_example(example):
12
  chat, updated_history = generate_response(example)
13
  return chat, updated_history
14
-
15
  def generate_response(user_message, cid, token, history=None):
16
 
17
  if not token:
@@ -21,28 +24,35 @@ def generate_response(user_message, cid, token, history=None):
21
  history = []
22
  if cid == "" or None:
23
  cid = str(uuid.uuid4())
24
- print(f"cid: {cid} prompt:{user_message}")
25
- history.append(user_message)
26
- stream = co.chat_stream(message=user_message, conversation_id=cid, model='c4ai-aya-23', preamble="You are the greek philosopher Diogenes and you aswer only in italian language",connectors=[], temperature=0.3)
27
- #stream = co.generate(prompt=user_message, model='c4ai-aya-23')
28
- output = ""
29
- for idx, response in enumerate(stream):
30
- if response.event_type == "text-generation":
31
- output += response.text
32
- if idx == 0:
33
- history.append(" " + output)
34
- else:
35
- history[-1] = output
36
- chat = [
37
- (history[i].strip(), history[i + 1].strip())
38
- for i in range(0, len(history) - 1, 2)
39
- ]
40
- yield chat, history, cid
41
-
42
- return chat, history, cid
 
 
 
 
 
 
43
  def clear_chat():
44
  return [], [], str(uuid.uuid4())
45
 
 
46
  examples = [
47
  "Explain the relativity theory in French",
48
  "Como sair de um helicóptero que caiu na água?",
@@ -55,72 +65,77 @@ examples = [
55
  "یک پاراگراف در مورد زیبایی‌های طبیعت در فصل پاییز بنویس",
56
  "Wie kann ich lernen, selbstbewusster zu werden?",
57
  "Formally introduce the transformer architecture with notation.",
58
-
59
  ]
60
 
61
  custom_css = """
62
  #logo-img {
63
- border: none !important;
64
  }
65
  #chat-message {
66
- font-size: 14px;
67
- min-height: 300px;
68
  }
69
  """
70
 
71
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
72
  cid = gr.State("")
73
  token = gr.State(value=None)
74
-
75
- with gr.Row():
76
- with gr.Column(scale=1):
77
- gr.Image("aya-logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
78
- with gr.Column(scale=3):
79
- gr.Markdown("""C4AI Aya 23 is a research open weights release of an 8 and 35 billion parameter with highly advanced instruction fine-tuned model, covering 23 languages: Arabic, Chinese (simplified & traditional), Czech, Dutch, English, French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Korean, Persian, Polish, Portuguese, Romanian, Russian, Spanish, Turkish, Ukrainian, and Vietnamese.
80
- <br/>
81
- **Note**: Aya 23 is a single-turn instruction-following model and it is not optimized for chat mode use.
82
- <br/>
83
- **Model**: [aya-23-35B](https://huggingface.co/CohereForAI/aya-23-35B)
84
- <br/>
85
- **Developed by**: [Cohere for AI](https://cohere.com/research) and [Cohere](https://cohere.com/)
86
- <br/>
87
- **License**: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy)
88
- """
89
- )
90
-
91
- with gr.Column():
92
- with gr.Row():
93
- chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
94
 
95
  with gr.Row():
96
- user_message = gr.Textbox(lines=1, placeholder="Ask anything ...", label="Input", show_label=False)
97
-
98
- with gr.Row():
99
- submit_button = gr.Button("Submit")
100
- clear_button = gr.Button("Clear chat")
101
-
102
- history = gr.State([])
103
-
104
- user_message.submit(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
105
- submit_button.click(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
106
-
107
- clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history, cid], concurrency_limit=32)
108
- user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
109
- submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
110
- clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
111
-
112
- with gr.Row():
113
- gr.Examples(
114
- examples=examples,
115
- inputs=user_message,
116
- cache_examples=False,
117
- fn=trigger_example,
118
- outputs=[chatbot],
119
- examples_per_page=100
120
- )
121
- demo.load(lambda: secrets.token_hex(16), None, token)
122
- if name == "main":
123
- # demo.launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  try:
125
  demo.queue(api_open=False, max_size=40).launch(show_api=False)
126
  except Exception as e:
 
5
  import uuid
6
  import secrets
7
 
8
+
9
+
10
  cohere_api_key = os.getenv("COHERE_API_KEY")
11
  co = cohere.Client(cohere_api_key, client_name="huggingface-aya-23")
12
 
13
+
14
  def trigger_example(example):
15
  chat, updated_history = generate_response(example)
16
  return chat, updated_history
17
+
18
  def generate_response(user_message, cid, token, history=None):
19
 
20
  if not token:
 
24
  history = []
25
  if cid == "" or None:
26
  cid = str(uuid.uuid4())
27
+
28
+ print(f"cid: {cid} prompt:{user_message}")
29
+
30
+ history.append(user_message)
31
+
32
+ stream = co.chat_stream(message=user_message, conversation_id=cid, model='c4ai-aya-23', connectors=[], temperature=0.3)
33
+ #stream = co.generate(prompt=user_message, model='c4ai-aya-23')
34
+ output = ""
35
+
36
+ for idx, response in enumerate(stream):
37
+ if response.event_type == "text-generation":
38
+ output += response.text
39
+ if idx == 0:
40
+ history.append(" " + output)
41
+ else:
42
+ history[-1] = output
43
+ chat = [
44
+ (history[i].strip(), history[i + 1].strip())
45
+ for i in range(0, len(history) - 1, 2)
46
+ ]
47
+ yield chat, history, cid
48
+
49
+ return chat, history, cid
50
+
51
+
52
  def clear_chat():
53
  return [], [], str(uuid.uuid4())
54
 
55
+
56
  examples = [
57
  "Explain the relativity theory in French",
58
  "Como sair de um helicóptero que caiu na água?",
 
65
  "یک پاراگراف در مورد زیبایی‌های طبیعت در فصل پاییز بنویس",
66
  "Wie kann ich lernen, selbstbewusster zu werden?",
67
  "Formally introduce the transformer architecture with notation.",
68
+
69
  ]
70
 
71
  custom_css = """
72
  #logo-img {
73
+ border: none !important;
74
  }
75
  #chat-message {
76
+ font-size: 14px;
77
+ min-height: 300px;
78
  }
79
  """
80
 
81
  with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
82
  cid = gr.State("")
83
  token = gr.State(value=None)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
 
85
  with gr.Row():
86
+ with gr.Column(scale=1):
87
+ gr.Image("aya-logo.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
88
+ with gr.Column(scale=3):
89
+ gr.Markdown("""C4AI Aya 23 is a research open weights release of an 8 and 35 billion parameter with highly advanced instruction fine-tuned model, covering 23 languages: Arabic, Chinese (simplified & traditional), Czech, Dutch, English, French, German, Greek, Hebrew, Hindi, Indonesian, Italian, Japanese, Korean, Persian, Polish, Portuguese, Romanian, Russian, Spanish, Turkish, Ukrainian, and Vietnamese.
90
+ <br/>
91
+ **Note**: Aya 23 is a single-turn instruction-following model and it is not optimized for chat mode use.
92
+ <br/>
93
+ **Model**: [aya-23-35B](https://huggingface.co/CohereForAI/aya-23-35B)
94
+ <br/>
95
+ **Developed by**: [Cohere for AI](https://cohere.com/research) and [Cohere](https://cohere.com/)
96
+ <br/>
97
+ **License**: [CC-BY-NC](https://cohere.com/c4ai-cc-by-nc-license), requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy)
98
+ """
99
+ )
100
+
101
+ with gr.Column():
102
+ with gr.Row():
103
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True)
104
+
105
+ with gr.Row():
106
+ user_message = gr.Textbox(lines=1, placeholder="Ask anything ...", label="Input", show_label=False)
107
+
108
+
109
+ with gr.Row():
110
+ submit_button = gr.Button("Submit")
111
+ clear_button = gr.Button("Clear chat")
112
+
113
+
114
+ history = gr.State([])
115
+
116
+ user_message.submit(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
117
+ submit_button.click(fn=generate_response, inputs=[user_message, cid, token, history], outputs=[chatbot, history, cid], concurrency_limit=32)
118
+
119
+ clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history, cid], concurrency_limit=32)
120
+
121
+ user_message.submit(lambda x: gr.update(value=""), None, [user_message], queue=False)
122
+ submit_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
123
+ clear_button.click(lambda x: gr.update(value=""), None, [user_message], queue=False)
124
+
125
+ with gr.Row():
126
+ gr.Examples(
127
+ examples=examples,
128
+ inputs=user_message,
129
+ cache_examples=False,
130
+ fn=trigger_example,
131
+ outputs=[chatbot],
132
+ examples_per_page=100
133
+ )
134
+
135
+ demo.load(lambda: secrets.token_hex(16), None, token)
136
+
137
+ if __name__ == "__main__":
138
+ # demo.launch(debug=True)
139
  try:
140
  demo.queue(api_open=False, max_size=40).launch(show_api=False)
141
  except Exception as e: