jiminaa commited on
Commit
0a5c2ee
·
1 Parent(s): 010d016

learning how to use gradio?

Browse files
Files changed (1) hide show
  1. app.py +15 -14
app.py CHANGED
@@ -133,20 +133,20 @@ def read_api():
133
  "languages": list(adapter_paths.keys()),
134
  "device": "CPU 16GB in Huggingface Space",
135
  "endpoints": {
136
- "POST /generate": "Generate with streaming",
137
- "GET /languages": "List available languages"
138
  }
139
  }
140
 
141
  # return information about the langauge of the model
142
- @app.get("/languages")
143
  def get_languages():
144
  return {
145
  "languages": list(adapter_paths.keys()),
146
  }
147
 
148
  # providing a response through a stream
149
- @app.post("/generate")
150
  async def generate_stream_api(request: GenerateRequest):
151
 
152
  # because pydantic uses Message class
@@ -191,19 +191,24 @@ def chat_gradio(message, history, language, system_prompt, max_length, temperatu
191
  # only uses the last 10 messages to keep within context limit
192
  messages.extend(history[-10:])
193
 
194
- messages.append({"role": "user", "content": message})
 
195
 
196
- partial_response = ""
197
  for token in generate_text_stream(
198
  messages,
199
  language,
200
  max_length,
201
  temperature
202
  ):
203
- partial_response += token
204
- yield partial_response
205
 
206
- with gr.Blocks(title="Language Learning Chatbot", theme=gr.themes.Soft()) as demo:
 
 
 
 
207
 
208
  with gr.Row():
209
  with gr.Column(scale=2):
@@ -289,11 +294,7 @@ with gr.Blocks(title="Language Learning Chatbot", theme=gr.themes.Soft()) as dem
289
  queue=False # Don't queue this action
290
  )
291
 
292
- app = gr.mount_gradio_app(app, demo, path="/ui")
293
-
294
- @app.get("/")
295
- def redirect_to_ui():
296
- return RedirectResponse(url="/ui/", status_code=302)
297
 
298
  if __name__ == "__main__":
299
  uvicorn.run(app, host="0.0.0.0", port=7860)
 
133
  "languages": list(adapter_paths.keys()),
134
  "device": "CPU 16GB in Huggingface Space",
135
  "endpoints": {
136
+ "POST /api/generate": "Generate with streaming",
137
+ "GET /api/languages": "List available languages"
138
  }
139
  }
140
 
141
  # return information about the langauge of the model
142
+ @app.get("/api/languages")
143
  def get_languages():
144
  return {
145
  "languages": list(adapter_paths.keys()),
146
  }
147
 
148
  # providing a response through a stream
149
+ @app.post("/api/generate")
150
  async def generate_stream_api(request: GenerateRequest):
151
 
152
  # because pydantic uses Message class
 
191
  # only uses the last 10 messages to keep within context limit
192
  messages.extend(history[-10:])
193
 
194
+ user_msg = {"role": "user", "content": message}
195
+ messages.append(user_msg)
196
 
197
+ assistant_msg = {"role": "assistant", "content": ""}
198
  for token in generate_text_stream(
199
  messages,
200
  language,
201
  max_length,
202
  temperature
203
  ):
204
+ assistant_msg["content"] += token
205
+ yield history + [user_msg, assistant_msg]
206
 
207
+ with gr.Blocks(
208
+ title="Language Learning Chatbot",
209
+ theme=gr.themes.Soft(),
210
+ queue=False
211
+ ) as demo:
212
 
213
  with gr.Row():
214
  with gr.Column(scale=2):
 
294
  queue=False # Don't queue this action
295
  )
296
 
297
+ app = gr.mount_gradio_app(app, demo, path="/")
 
 
 
 
298
 
299
  if __name__ == "__main__":
300
  uvicorn.run(app, host="0.0.0.0", port=7860)