Arno' Francesco (GDS DS&G) commited on
Commit
9518f95
·
1 Parent(s): cf13a53

use gemini as llm

Browse files
Files changed (4) hide show
  1. .env +1 -0
  2. .gitignore +1 -0
  3. app.py +22 -7
  4. utils.py +40 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ GEMINI_API_KEY=AIzaSyA7UYtFpPRUwCV0D4fPz1Kg3XqEsSvuxas
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ test.py
app.py CHANGED
@@ -4,13 +4,22 @@ import requests
4
  import inspect
5
  import pandas as pd
6
  from smolagents import CodeAgent, HfApiModel, LiteLLMModel, Tool, DuckDuckGoSearchTool
 
7
  from agent_tools import calculator_tool, visit_webpage
8
  import numpy as np
9
  import time
10
  import datetime
11
  from smolagents import InferenceClientModel, TransformersModel
 
 
12
 
 
13
 
 
 
 
 
 
14
 
15
 
16
  # (Keep Constants as is)
@@ -20,11 +29,11 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
20
  # --- Basic Agent Definition ---
21
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
22
  class BasicAgent:
23
- def __init__(self):
24
 
25
  print("BasicAgent initialized.")
26
  # Initialize the Hugging Face model
27
- self.model = HfApiModel()
28
  search_tool = DuckDuckGoSearchTool()
29
 
30
  self.web_agent = CodeAgent(
@@ -35,9 +44,6 @@ class BasicAgent:
35
  description="Runs web searches for you.",
36
  )
37
 
38
- self.check_model = TransformersModel(model_id="HuggingFaceTB/SmolLM-135M-Instruct")
39
-
40
-
41
  def check_final_answer(self, final_answer, agent_memory):
42
  prompt = f"""Here is a user-given task and the agent steps: {agent_memory.get_succinct_steps()}. \
43
  Report your thoughts, and finish your answer with the following template:
@@ -57,8 +63,15 @@ class BasicAgent:
57
 
58
  #model = InferenceClientModel()
59
 
60
- response = self.check_model(messages=messages)
61
- return response
 
 
 
 
 
 
 
62
 
63
  def __call__(self, question: str) -> str:
64
  print(f"Agent received question (first 50 chars): {question[:50]}...")
@@ -135,6 +148,8 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
135
  for item in questions_data:
136
  task_id = item.get("task_id")
137
  question_text = item.get("question")
 
 
138
  if not task_id or question_text is None:
139
  print(f"Skipping item with missing task_id or question: {item}")
140
  continue
 
4
  import inspect
5
  import pandas as pd
6
  from smolagents import CodeAgent, HfApiModel, LiteLLMModel, Tool, DuckDuckGoSearchTool
7
+ import litellm
8
  from agent_tools import calculator_tool, visit_webpage
9
  import numpy as np
10
  import time
11
  import datetime
12
  from smolagents import InferenceClientModel, TransformersModel
13
+ from utils import download_file
14
+ from dotenv import load_dotenv
15
 
16
+ load_dotenv()
17
 
18
+ llm_model = LiteLLMModel(
19
+ model_id="gemini/gemini-2.0-flash", # you can see other model names here: https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models. It is important to prefix the name with "gemini/"
20
+ api_key=os.getenv("GEMINI_API_KEY"),
21
+ max_tokens=8192
22
+ )
23
 
24
 
25
  # (Keep Constants as is)
 
29
  # --- Basic Agent Definition ---
30
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
31
  class BasicAgent:
32
+ def __init__(self, model):
33
 
34
  print("BasicAgent initialized.")
35
  # Initialize the Hugging Face model
36
+ self.model = model
37
  search_tool = DuckDuckGoSearchTool()
38
 
39
  self.web_agent = CodeAgent(
 
44
  description="Runs web searches for you.",
45
  )
46
 
 
 
 
47
  def check_final_answer(self, final_answer, agent_memory):
48
  prompt = f"""Here is a user-given task and the agent steps: {agent_memory.get_succinct_steps()}. \
49
  Report your thoughts, and finish your answer with the following template:
 
63
 
64
  #model = InferenceClientModel()
65
 
66
+ response = litellm.completion(
67
+ model="gemini/gemini-2.0-flash",
68
+ messages=messages,
69
+ )
70
+
71
+ # Extract the response content
72
+ content = response.get('choices', [{}])[0].get('message', {}).get('content')
73
+
74
+ return content
75
 
76
  def __call__(self, question: str) -> str:
77
  print(f"Agent received question (first 50 chars): {question[:50]}...")
 
148
  for item in questions_data:
149
  task_id = item.get("task_id")
150
  question_text = item.get("question")
151
+ if len(item.get("file_name")):
152
+ download_file(item)
153
  if not task_id or question_text is None:
154
  print(f"Skipping item with missing task_id or question: {item}")
155
  continue
utils.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ def download_file(record):
4
+ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
5
+
6
+ task_id = record['task_id']
7
+ url = DEFAULT_API_URL+f'/files/{task_id}'
8
+
9
+ filename = record['file_name']
10
+
11
+ if filename.endswith('.mp3'):
12
+ # Local path where the audio will be saved
13
+ # Send a GET request to fetch the audio file
14
+ response = requests.get(url, stream=True)
15
+
16
+ # Check if the request was successful
17
+ if response.status_code == 200:
18
+ # Open a local file in binary write mode
19
+ with open(filename, 'wb') as f:
20
+ for chunk in response.iter_content(chunk_size=1024):
21
+ if chunk: # filter out keep-alive new chunks
22
+ f.write(chunk)
23
+ print(f"Audio file downloaded successfully and saved as {filename}")
24
+ else:
25
+ print(f"Failed to download audio file. Status code: {response.status_code}")
26
+
27
+ elif filename.endswith('.png') or filename.endswith('.py') or filename.endswith('.xlsx'):
28
+ # Send a GET request to fetch the image file
29
+ response = requests.get(url, stream=True)
30
+
31
+ # Check if the request was successful
32
+ if response.status_code == 200:
33
+ # Open a local file in binary write mode
34
+ with open(filename, 'wb') as f:
35
+ for chunk in response.iter_content(chunk_size=1024):
36
+ if chunk: # filter out keep-alive new chunks
37
+ f.write(chunk)
38
+ print(f"Image file downloaded successfully and saved as {filename}")
39
+ else:
40
+ print(f"Failed to download image file. Status code: {response.status_code}")