CUMANI Paolo
commited on
Commit
·
fea783c
1
Parent(s):
17e605d
[ADD] Choice of model type from UI
Browse files
agent.py
CHANGED
|
@@ -41,9 +41,7 @@ class FinalAgent:
|
|
| 41 |
chat = ChatHuggingFace(llm=llm, verbose=True)
|
| 42 |
elif model_type == "OLLAMA":
|
| 43 |
from langchain_ollama import ChatOllama
|
| 44 |
-
#chat = ChatOllama(model = "qwen2.5:14b-instruct")
|
| 45 |
chat = ChatOllama(model = "qwen3:8b")
|
| 46 |
-
#chat = ChatOllama(model = "gpt-oss:20b")
|
| 47 |
elif model_type == "GOOGLE":
|
| 48 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 49 |
from langchain_core.rate_limiters import InMemoryRateLimiter
|
|
@@ -55,7 +53,8 @@ class FinalAgent:
|
|
| 55 |
max_bucket_size=10, # Controls the maximum burst size.
|
| 56 |
)
|
| 57 |
chat = ChatGoogleGenerativeAI(model="gemini-2.5-flash", rate_limiter=rate_limiter)
|
| 58 |
-
|
|
|
|
| 59 |
|
| 60 |
tools = [webpage_reader_tool,
|
| 61 |
transcribe_youtube_video_tool,
|
|
|
|
| 41 |
chat = ChatHuggingFace(llm=llm, verbose=True)
|
| 42 |
elif model_type == "OLLAMA":
|
| 43 |
from langchain_ollama import ChatOllama
|
|
|
|
| 44 |
chat = ChatOllama(model = "qwen3:8b")
|
|
|
|
| 45 |
elif model_type == "GOOGLE":
|
| 46 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 47 |
from langchain_core.rate_limiters import InMemoryRateLimiter
|
|
|
|
| 53 |
max_bucket_size=10, # Controls the maximum burst size.
|
| 54 |
)
|
| 55 |
chat = ChatGoogleGenerativeAI(model="gemini-2.5-flash", rate_limiter=rate_limiter)
|
| 56 |
+
else:
|
| 57 |
+
raise ValueError(f'Model provider can be only one between GOOGLE, OLLAMA or HUGGINGFACE, received {model_type}')
|
| 58 |
|
| 59 |
tools = [webpage_reader_tool,
|
| 60 |
transcribe_youtube_video_tool,
|
app.py
CHANGED
|
@@ -10,7 +10,7 @@ from agent import FinalAgent
|
|
| 10 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 11 |
|
| 12 |
|
| 13 |
-
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 14 |
"""
|
| 15 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 16 |
and displays the results.
|
|
@@ -31,7 +31,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 31 |
|
| 32 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 33 |
try:
|
| 34 |
-
agent = FinalAgent(model_type=
|
| 35 |
except Exception as e:
|
| 36 |
print(f"Error instantiating agent: {e}")
|
| 37 |
return f"Error initializing agent: {e}", None
|
|
@@ -166,7 +166,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 166 |
|
| 167 |
# --- Build Gradio Interface using Blocks ---
|
| 168 |
with gr.Blocks() as demo:
|
| 169 |
-
gr.Markdown("#
|
| 170 |
gr.Markdown(
|
| 171 |
"""
|
| 172 |
**Instructions:**
|
|
@@ -177,10 +177,12 @@ with gr.Blocks() as demo:
|
|
| 177 |
|
| 178 |
---
|
| 179 |
**Disclaimers:**
|
| 180 |
-
Once clicking on the "submit button, it can take quite some time (
|
| 181 |
-
This space provides a basic setup and is intentionally sub-optimal to encourage you to develop your own, more robust solution. For instance for the delay process of the submit button, a solution could be to cache the answers and submit in a seperate action or even to answer the questions in async.
|
| 182 |
"""
|
| 183 |
)
|
|
|
|
|
|
|
|
|
|
| 184 |
|
| 185 |
gr.LoginButton()
|
| 186 |
|
|
@@ -192,6 +194,7 @@ with gr.Blocks() as demo:
|
|
| 192 |
|
| 193 |
run_button.click(
|
| 194 |
fn=run_and_submit_all,
|
|
|
|
| 195 |
outputs=[status_output, results_table]
|
| 196 |
)
|
| 197 |
|
|
|
|
| 10 |
DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
| 11 |
|
| 12 |
|
| 13 |
+
def run_and_submit_all( profile: gr.OAuthProfile | None, model_type):
|
| 14 |
"""
|
| 15 |
Fetches all questions, runs the BasicAgent on them, submits all answers,
|
| 16 |
and displays the results.
|
|
|
|
| 31 |
|
| 32 |
# 1. Instantiate Agent ( modify this part to create your agent)
|
| 33 |
try:
|
| 34 |
+
agent = FinalAgent(model_type=model_type.upper().replace(' ', ''))
|
| 35 |
except Exception as e:
|
| 36 |
print(f"Error instantiating agent: {e}")
|
| 37 |
return f"Error initializing agent: {e}", None
|
|
|
|
| 166 |
|
| 167 |
# --- Build Gradio Interface using Blocks ---
|
| 168 |
with gr.Blocks() as demo:
|
| 169 |
+
gr.Markdown("# Agent Evaluation Runner")
|
| 170 |
gr.Markdown(
|
| 171 |
"""
|
| 172 |
**Instructions:**
|
|
|
|
| 177 |
|
| 178 |
---
|
| 179 |
**Disclaimers:**
|
| 180 |
+
Once clicking on the "submit" button, it can take quite some time (this is the time for the agent to go through all the questions).
|
|
|
|
| 181 |
"""
|
| 182 |
)
|
| 183 |
+
model_selector = gr.Dropdown(
|
| 184 |
+
choices=["Google", "Ollama", "Hugging Face"], label="LLM provider", info="Which model provider should be used for inference?", interactive=True
|
| 185 |
+
)
|
| 186 |
|
| 187 |
gr.LoginButton()
|
| 188 |
|
|
|
|
| 194 |
|
| 195 |
run_button.click(
|
| 196 |
fn=run_and_submit_all,
|
| 197 |
+
inputs=model_selector,
|
| 198 |
outputs=[status_output, results_table]
|
| 199 |
)
|
| 200 |
|