Spaces:
Sleeping
Sleeping
Commit
·
a3e63cc
1
Parent(s):
7c7b645
improve debugging in hugging face spaces
Browse files- Gradio_UI.py +1 -0
- app.py +2 -0
Gradio_UI.py
CHANGED
|
@@ -291,6 +291,7 @@ class GradioUI:
|
|
| 291 |
).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
|
| 292 |
|
| 293 |
share = not os.getenv("GRADIO_SHARE") == "false"
|
|
|
|
| 294 |
demo.launch(debug=True, share=share, **kwargs)
|
| 295 |
|
| 296 |
__all__ = ["stream_to_gradio", "GradioUI"]
|
|
|
|
| 291 |
).then(self.interact_with_agent, [stored_messages, chatbot], [chatbot])
|
| 292 |
|
| 293 |
share = not os.getenv("GRADIO_SHARE") == "false"
|
| 294 |
+
print("GRADIO_SHARE is set to", os.getenv("GRADIO_SHARE"))
|
| 295 |
demo.launch(debug=True, share=share, **kwargs)
|
| 296 |
|
| 297 |
__all__ = ["stream_to_gradio", "GradioUI"]
|
app.py
CHANGED
|
@@ -32,12 +32,14 @@ def get_current_time_in_timezone(timezone: str) -> str:
|
|
| 32 |
|
| 33 |
def choose_model():
|
| 34 |
if os.getenv("OLLAMA_MODEL"):
|
|
|
|
| 35 |
return LiteLLMModel(
|
| 36 |
model_id=os.getenv("OLLAMA_MODEL"),
|
| 37 |
api_base=os.getenv("OLLAMA_ENDPOINT"),
|
| 38 |
api_key=os.getenv("OLLAMA_KEY"),
|
| 39 |
)
|
| 40 |
else:
|
|
|
|
| 41 |
return HfApiModel(
|
| 42 |
max_tokens=2096,
|
| 43 |
temperature=0.5,
|
|
|
|
| 32 |
|
| 33 |
def choose_model():
|
| 34 |
if os.getenv("OLLAMA_MODEL"):
|
| 35 |
+
print("Using an Ollama model")
|
| 36 |
return LiteLLMModel(
|
| 37 |
model_id=os.getenv("OLLAMA_MODEL"),
|
| 38 |
api_base=os.getenv("OLLAMA_ENDPOINT"),
|
| 39 |
api_key=os.getenv("OLLAMA_KEY"),
|
| 40 |
)
|
| 41 |
else:
|
| 42 |
+
print("Using a HuggingFace model")
|
| 43 |
return HfApiModel(
|
| 44 |
max_tokens=2096,
|
| 45 |
temperature=0.5,
|