Spaces:
Sleeping
Sleeping
Using litellm instead of huggingfaceapi
Browse files
app.py
CHANGED
|
@@ -2,7 +2,7 @@ import os
|
|
| 2 |
import gradio as gr
|
| 3 |
from llama_index.core.tools import FunctionTool
|
| 4 |
from llama_index.core.agent.workflow import AgentWorkflow, ReActAgent
|
| 5 |
-
from llama_index.llms.
|
| 6 |
from code_agent import initialize_code_agent
|
| 7 |
from scientific_paper_agent import load_scientific_paper_dataset, ScientificPaperRetriever
|
| 8 |
from search_agent import init_search_tool
|
|
@@ -13,9 +13,9 @@ global currentMode
|
|
| 13 |
|
| 14 |
hf_token = os.environ.get('HF_TOKEN')
|
| 15 |
|
| 16 |
-
llm =
|
| 17 |
-
model_name="Qwen/Qwen2.5-7B-Instruct",
|
| 18 |
-
|
| 19 |
)
|
| 20 |
image_to_text_tool = FunctionTool.from_defaults(
|
| 21 |
fn=init_image_to_text,
|
|
@@ -100,7 +100,7 @@ multi_agent_workflow = AgentWorkflow(
|
|
| 100 |
|
| 101 |
|
| 102 |
|
| 103 |
-
def respond(
|
| 104 |
message,
|
| 105 |
history: list[dict[str, str]],
|
| 106 |
system_message,
|
|
@@ -110,7 +110,12 @@ def respond(
|
|
| 110 |
mode
|
| 111 |
):
|
| 112 |
global currentMode
|
| 113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
if mode == "Math Mode":
|
| 116 |
currentMode = "math"
|
|
@@ -121,7 +126,10 @@ def respond(
|
|
| 121 |
else:
|
| 122 |
currentMode = "conversation"
|
| 123 |
|
| 124 |
-
|
|
|
|
|
|
|
|
|
|
| 125 |
|
| 126 |
|
| 127 |
with gr.Blocks() as demo:
|
|
@@ -131,11 +139,11 @@ with gr.Blocks() as demo:
|
|
| 131 |
value="Conversation Mode",
|
| 132 |
label="Interaction Mode"
|
| 133 |
)
|
| 134 |
-
|
| 135 |
# ChatInterface without additional_inputs
|
| 136 |
chatbot = gr.ChatInterface(
|
| 137 |
fn=respond,
|
| 138 |
-
type="messages"
|
|
|
|
| 139 |
)
|
| 140 |
|
| 141 |
# Link dropdown value to respond function
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from llama_index.core.tools import FunctionTool
|
| 4 |
from llama_index.core.agent.workflow import AgentWorkflow, ReActAgent
|
| 5 |
+
from llama_index.llms.litellm import LiteLLM
|
| 6 |
from code_agent import initialize_code_agent
|
| 7 |
from scientific_paper_agent import load_scientific_paper_dataset, ScientificPaperRetriever
|
| 8 |
from search_agent import init_search_tool
|
|
|
|
| 13 |
|
| 14 |
hf_token = os.environ.get('HF_TOKEN')
|
| 15 |
|
| 16 |
+
llm = LiteLLM(
|
| 17 |
+
model_name="huggingface/Qwen/Qwen2.5-7B-Instruct",
|
| 18 |
+
api_key=hf_token
|
| 19 |
)
|
| 20 |
image_to_text_tool = FunctionTool.from_defaults(
|
| 21 |
fn=init_image_to_text,
|
|
|
|
| 100 |
|
| 101 |
|
| 102 |
|
| 103 |
+
async def respond(
|
| 104 |
message,
|
| 105 |
history: list[dict[str, str]],
|
| 106 |
system_message,
|
|
|
|
| 110 |
mode
|
| 111 |
):
|
| 112 |
global currentMode
|
| 113 |
+
if mode is not None:
|
| 114 |
+
mode = mode
|
| 115 |
+
else:
|
| 116 |
+
mode = "Conversation Mode"
|
| 117 |
+
|
| 118 |
+
print(f"Current Mode: {mode}")
|
| 119 |
|
| 120 |
if mode == "Math Mode":
|
| 121 |
currentMode = "math"
|
|
|
|
| 126 |
else:
|
| 127 |
currentMode = "conversation"
|
| 128 |
|
| 129 |
+
result = await multi_agent_workflow.run(message, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
|
| 130 |
+
|
| 131 |
+
return result
|
| 132 |
+
|
| 133 |
|
| 134 |
|
| 135 |
with gr.Blocks() as demo:
|
|
|
|
| 139 |
value="Conversation Mode",
|
| 140 |
label="Interaction Mode"
|
| 141 |
)
|
|
|
|
| 142 |
# ChatInterface without additional_inputs
|
| 143 |
chatbot = gr.ChatInterface(
|
| 144 |
fn=respond,
|
| 145 |
+
type="messages",
|
| 146 |
+
additional_inputs=[mode_dropdown]
|
| 147 |
)
|
| 148 |
|
| 149 |
# Link dropdown value to respond function
|