Raffaele Terribile
commited on
Modifica app per utilizzare un modello locale
Browse files
app.py
CHANGED
|
@@ -5,10 +5,27 @@ import requests
|
|
| 5 |
import inspect
|
| 6 |
import pandas as pd
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
from litellm import LiteLLM
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# (Keep Constants as is)
|
| 14 |
# --- Constants ---
|
|
@@ -25,11 +42,63 @@ def invert_sentence(sentence: str) -> str:
|
|
| 25 |
"""
|
| 26 |
return sentence[::-1]
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
# --- First Agent Definition ---
|
| 29 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 30 |
class FirstAgent:
|
| 31 |
### First Agent is the first attempt to develop an agent for the course. ###
|
| 32 |
def __init__(self):
|
|
|
|
| 33 |
# # Usa un modello Hugging Face gratuito
|
| 34 |
# token = os.getenv(os.getenv("TOKEN_NAME"))
|
| 35 |
# os.environ["HF_TOKEN"] = token
|
|
@@ -37,52 +106,81 @@ class FirstAgent:
|
|
| 37 |
# token=token
|
| 38 |
# )
|
| 39 |
|
| 40 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
model = None
|
| 42 |
|
| 43 |
-
# Try 1: Modello locale
|
| 44 |
try:
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
model = pipeline(
|
| 49 |
-
task="text-generation",
|
| 50 |
-
tokenizer=tokenizer,
|
| 51 |
-
model=model
|
| 52 |
-
)
|
| 53 |
-
print(f"Using local {model_id} model")
|
| 54 |
except Exception as e:
|
| 55 |
-
print(f"
|
| 56 |
|
| 57 |
-
# Try 2: Modello remoto
|
| 58 |
try:
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
)
|
| 62 |
-
print("Using Groq remote model")
|
| 63 |
except Exception as ex:
|
| 64 |
-
print(f"
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
|
|
|
| 67 |
self.agent = CodeAgent(
|
| 68 |
model=model,
|
| 69 |
tools=[
|
| 70 |
-
# DuckDuckGoSearchTool(),
|
| 71 |
-
# GoogleSearchTool(),
|
| 72 |
WebSearchTool(),
|
| 73 |
PythonInterpreterTool(),
|
| 74 |
WikipediaSearchTool(),
|
| 75 |
-
VisitWebpageTool()
|
| 76 |
-
# FinalAnswerTool #,
|
| 77 |
-
# Tool(name="invert_sentence", func=invert_sentence, description="Inverts the order of characters in a sentence.")
|
| 78 |
]
|
| 79 |
)
|
| 80 |
-
print("FirstAgent
|
|
|
|
| 81 |
def __call__(self, question: str) -> str:
|
| 82 |
-
print(f"Agent
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
# --- Basic Agent Definition ---
|
| 88 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
@@ -198,8 +296,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
| 198 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 199 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 200 |
except Exception as e:
|
| 201 |
-
|
| 202 |
-
|
| 203 |
|
| 204 |
if not answers_payload:
|
| 205 |
print("Agent did not produce any answers to submit.")
|
|
@@ -307,4 +405,4 @@ if __name__ == "__main__":
|
|
| 307 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 308 |
|
| 309 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 310 |
-
demo.launch(debug=True, share=False)
|
|
|
|
| 5 |
import inspect
|
| 6 |
import pandas as pd
|
| 7 |
|
| 8 |
+
# =============================================================================
|
| 9 |
+
# MODIFICHE APPORTATE PER RISOLVERE L'ERRORE "generate" NON TROVATO:
|
| 10 |
+
#
|
| 11 |
+
# PROBLEMA ORIGINALE:
|
| 12 |
+
# - Il pipeline di Transformers non è direttamente compatibile con smolagents
|
| 13 |
+
# - CodeAgent si aspetta un'interfaccia specifica che pipeline non implementa
|
| 14 |
+
# - L'errore "generate" si verificava perché smolagents cercava metodi non presenti
|
| 15 |
+
#
|
| 16 |
+
# SOLUZIONE IMPLEMENTATA:
|
| 17 |
+
# - Creata classe SimpleLocalModel che fa da wrapper
|
| 18 |
+
# - Implementa l'interfaccia __call__() che smolagents si aspetta
|
| 19 |
+
# - Gestisce la conversione dei messaggi e la generazione delle risposte
|
| 20 |
+
# - Fallback multipli: locale -> remoto -> fisso
|
| 21 |
+
# =============================================================================
|
| 22 |
+
|
| 23 |
+
from smolagents import CodeAgent, InferenceClientModel, VisitWebpageTool, PythonInterpreterTool, WebSearchTool, WikipediaSearchTool, FinalAnswerTool, Tool, tool
|
| 24 |
+
# Importazioni per modelli locali (SOLUZIONE per errore "generate"):
|
| 25 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
| 26 |
from litellm import LiteLLM
|
| 27 |
+
import threading
|
| 28 |
+
import time
|
| 29 |
|
| 30 |
# (Keep Constants as is)
|
| 31 |
# --- Constants ---
|
|
|
|
| 42 |
"""
|
| 43 |
return sentence[::-1]
|
| 44 |
|
| 45 |
+
# Wrapper semplificato per modelli locali
|
| 46 |
+
# NUOVO APPROCCIO: Questa classe risolve il problema dell'errore "generate"
|
| 47 |
+
# creando un'interfaccia compatibile tra Transformers pipeline e smolagents
|
| 48 |
+
class SimpleLocalModel:
|
| 49 |
+
"""Wrapper semplice per modelli Transformers locali."""
|
| 50 |
+
|
| 51 |
+
def __init__(self, model_name="gpt2"):
|
| 52 |
+
self.model_name = model_name
|
| 53 |
+
self.pipeline = None
|
| 54 |
+
self._load_model()
|
| 55 |
+
|
| 56 |
+
def _load_model(self):
|
| 57 |
+
"""Carica il modello locale."""
|
| 58 |
+
try:
|
| 59 |
+
print(f"Caricamento modello locale: {self.model_name}")
|
| 60 |
+
self.pipeline = pipeline(
|
| 61 |
+
"text-generation",
|
| 62 |
+
model=self.model_name,
|
| 63 |
+
# device=-1, # Usa CPU
|
| 64 |
+
return_full_text=False # Restituisce solo il testo generato
|
| 65 |
+
)
|
| 66 |
+
print(f"✅ Modello {self.model_name} caricato")
|
| 67 |
+
except Exception as e:
|
| 68 |
+
print(f"❌ Errore caricamento modello: {e}")
|
| 69 |
+
raise
|
| 70 |
+
|
| 71 |
+
def __call__(self, messages, **kwargs):
|
| 72 |
+
"""Genera risposta compatibile con smolagents."""
|
| 73 |
+
try:
|
| 74 |
+
# Estrai il prompt
|
| 75 |
+
if isinstance(messages, list) and messages:
|
| 76 |
+
prompt = messages[-1].get("content", "") if isinstance(messages[-1], dict) else str(messages[-1])
|
| 77 |
+
else:
|
| 78 |
+
prompt = str(messages)
|
| 79 |
+
|
| 80 |
+
if not prompt.strip():
|
| 81 |
+
return "Mi dispiace, non ho ricevuto una domanda."
|
| 82 |
+
|
| 83 |
+
# Genera risposta
|
| 84 |
+
result = self.pipeline(prompt, max_new_tokens=100, do_sample=True, temperature=0.7)
|
| 85 |
+
|
| 86 |
+
if result and len(result) > 0:
|
| 87 |
+
answer = result[0].get("generated_text", "").strip()
|
| 88 |
+
return answer if answer else "Non sono riuscito a generare una risposta."
|
| 89 |
+
else:
|
| 90 |
+
return "Errore nella generazione della risposta."
|
| 91 |
+
|
| 92 |
+
except Exception as e:
|
| 93 |
+
print(f"Errore generazione: {e}")
|
| 94 |
+
return f"Errore: {str(e)}"
|
| 95 |
+
|
| 96 |
# --- First Agent Definition ---
|
| 97 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 98 |
class FirstAgent:
|
| 99 |
### First Agent is the first attempt to develop an agent for the course. ###
|
| 100 |
def __init__(self):
|
| 101 |
+
# CODICE ORIGINALE COMMENTATO (che causava l'errore "generate"):
|
| 102 |
# # Usa un modello Hugging Face gratuito
|
| 103 |
# token = os.getenv(os.getenv("TOKEN_NAME"))
|
| 104 |
# os.environ["HF_TOKEN"] = token
|
|
|
|
| 106 |
# token=token
|
| 107 |
# )
|
| 108 |
|
| 109 |
+
# CODICE ORIGINALE COMMENTATO (approccio con pipeline non compatibile):
|
| 110 |
+
# # Configurazione con fallback multipli
|
| 111 |
+
# model = None
|
| 112 |
+
# # Try 1: Modello locale via Transformers
|
| 113 |
+
# try:
|
| 114 |
+
# model_id = "microsoft/Phi-4-mini-reasoning"
|
| 115 |
+
# tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 116 |
+
# model = AutoModelForCausalLM.from_pretrained(model_id) # ~500MB
|
| 117 |
+
# model = pipeline(
|
| 118 |
+
# task="text-generation",
|
| 119 |
+
# tokenizer=tokenizer,
|
| 120 |
+
# model=model
|
| 121 |
+
# )
|
| 122 |
+
# print(f"Using local {model_id} model")
|
| 123 |
+
# except Exception as e:
|
| 124 |
+
# print(f"Local model failed: {e}")
|
| 125 |
+
# # Try 2: Modello remoto gratuito
|
| 126 |
+
# try:
|
| 127 |
+
# model = LiteLLM(
|
| 128 |
+
# model_id="groq/mixtral-8x7b-32768" # Gratuito con registrazione
|
| 129 |
+
# )
|
| 130 |
+
# print("Using Groq remote model")
|
| 131 |
+
# except Exception as ex:
|
| 132 |
+
# print(f"Remote model failed: {ex}")
|
| 133 |
+
# raise Exception("No working model configuration found")
|
| 134 |
+
|
| 135 |
+
# NUOVO CODICE FUNZIONANTE:
|
| 136 |
+
# Configurazione con fallback per modelli locali
|
| 137 |
model = None
|
| 138 |
|
| 139 |
+
# Try 1: Modello locale semplificato
|
| 140 |
try:
|
| 141 |
+
print("🔄 Tentativo 1: Modello locale GPT-2")
|
| 142 |
+
model = SimpleLocalModel("microsoft/Phi-4-mini-reasoning")
|
| 143 |
+
print("✅ Usando modello locale GPT-2")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
except Exception as e:
|
| 145 |
+
print(f"❌ Modello locale fallito: {e}")
|
| 146 |
|
| 147 |
+
# Try 2: Modello remoto (se disponibile)
|
| 148 |
try:
|
| 149 |
+
print("🔄 Tentativo 2: Modello remoto Groq")
|
| 150 |
+
model = LiteLLM(model="groq/mixtral-8x7b-32768")
|
| 151 |
+
print("✅ Usando modello remoto Groq")
|
|
|
|
| 152 |
except Exception as ex:
|
| 153 |
+
print(f"❌ Modello remoto fallito: {ex}")
|
| 154 |
+
|
| 155 |
+
# Try 3: Fallback finale - risposta fissa
|
| 156 |
+
class FallbackModel:
|
| 157 |
+
def __call__(self, messages, **kwargs):
|
| 158 |
+
return "Sono un agente semplificato. Il modello AI non è disponibile al momento."
|
| 159 |
+
|
| 160 |
+
model = FallbackModel()
|
| 161 |
+
print("⚠️ Usando modello di fallback")
|
| 162 |
|
| 163 |
+
# Inizializza l'agente
|
| 164 |
self.agent = CodeAgent(
|
| 165 |
model=model,
|
| 166 |
tools=[
|
|
|
|
|
|
|
| 167 |
WebSearchTool(),
|
| 168 |
PythonInterpreterTool(),
|
| 169 |
WikipediaSearchTool(),
|
| 170 |
+
VisitWebpageTool()
|
|
|
|
|
|
|
| 171 |
]
|
| 172 |
)
|
| 173 |
+
print("FirstAgent inizializzato.")
|
| 174 |
+
|
| 175 |
def __call__(self, question: str) -> str:
|
| 176 |
+
print(f"Agent ricevuto domanda (primi 50 char): {question[:50]}...")
|
| 177 |
+
try:
|
| 178 |
+
answer = self.agent.run(question)
|
| 179 |
+
print(f"Agent restituisce risposta: {str(answer)[:100]}...")
|
| 180 |
+
return str(answer)
|
| 181 |
+
except Exception as e:
|
| 182 |
+
print(f"Errore nell'agente: {e}")
|
| 183 |
+
return f"Errore nell'agente: {str(e)}"
|
| 184 |
|
| 185 |
# --- Basic Agent Definition ---
|
| 186 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
|
|
| 296 |
answers_payload.append({"task_id": task_id, "submitted_answer": submitted_answer})
|
| 297 |
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": submitted_answer})
|
| 298 |
except Exception as e:
|
| 299 |
+
print(f"Error running agent on task {task_id}: {e}")
|
| 300 |
+
results_log.append({"Task ID": task_id, "Question": question_text, "Submitted Answer": f"AGENT ERROR: {e}"})
|
| 301 |
|
| 302 |
if not answers_payload:
|
| 303 |
print("Agent did not produce any answers to submit.")
|
|
|
|
| 405 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
| 406 |
|
| 407 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
| 408 |
+
demo.launch(debug=True, share=False)
|