Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,38 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
from google import genai
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
client = genai.Client(api_key="AIzaSyD6voSAiSUim17kB90skpdisMMyFXZPxMo")
|
| 5 |
MODEL_ID = "gemini-2.0-flash-exp"
|
| 6 |
|
|
@@ -49,7 +81,8 @@ def respond(
|
|
| 49 |
response += token
|
| 50 |
yield response
|
| 51 |
'''
|
| 52 |
-
|
|
|
|
| 53 |
print (response)
|
| 54 |
yield response
|
| 55 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
from google import genai
|
| 4 |
+
import json
|
| 5 |
+
|
| 6 |
+
from IPython.display import display, HTML, Markdown
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def show_json(obj):
|
| 10 |
+
print(json.dumps(obj.model_dump(exclude_none=True), indent=2))
|
| 11 |
+
return json.dumps(obj.model_dump(exclude_none=True), indent=2)
|
| 12 |
+
|
| 13 |
+
def show_parts(r):
|
| 14 |
+
parts = r.candidates[0].content.parts
|
| 15 |
+
if parts is None:
|
| 16 |
+
finish_reason = r.candidates[0].finish_reason
|
| 17 |
+
print(f'{finish_reason=}')
|
| 18 |
+
return
|
| 19 |
+
for part in r.candidates[0].content.parts:
|
| 20 |
+
if part.text:
|
| 21 |
+
display(Markdown(part.text))
|
| 22 |
+
output = part.text
|
| 23 |
+
elif part.executable_code:
|
| 24 |
+
display(Markdown(f'```python\n{part.executable_code.code}\n```'))
|
| 25 |
+
output = part.executable_code
|
| 26 |
+
else:
|
| 27 |
+
show_json(part)
|
| 28 |
+
|
| 29 |
+
grounding_metadata = r.candidates[0].grounding_metadata
|
| 30 |
+
if grounding_metadata and grounding_metadata.search_entry_point:
|
| 31 |
+
display(HTML(grounding_metadata.search_entry_point.rendered_content))
|
| 32 |
+
return output
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
client = genai.Client(api_key="AIzaSyD6voSAiSUim17kB90skpdisMMyFXZPxMo")
|
| 37 |
MODEL_ID = "gemini-2.0-flash-exp"
|
| 38 |
|
|
|
|
| 81 |
response += token
|
| 82 |
yield response
|
| 83 |
'''
|
| 84 |
+
r = soccer_chat.send_message(f'''Analyze the conversation history so far: {messages} and come up with a response that continues the conversation forward.''')
|
| 85 |
+
response = show_parts(r)
|
| 86 |
print (response)
|
| 87 |
yield response
|
| 88 |
|