mjtaherikia commited on
Commit
407a734
·
1 Parent(s): 81917a3

New image tool added.

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
app.py CHANGED
@@ -4,15 +4,41 @@ import requests
4
  import inspect
5
  import pandas as pd
6
 
 
 
 
 
 
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
9
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
10
 
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
 
13
  class BasicAgent:
14
  def __init__(self):
15
  print("BasicAgent initialized.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  def __call__(self, question: str) -> str:
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
  fixed_answer = "This is a default answer."
 
4
  import inspect
5
  import pandas as pd
6
 
7
+ import requests
8
+ from smolagents import OpenAIServerModel
9
+ from smolagents import CodeAgent, DuckDuckGoSearchTool, VisitWebpageTool, FinalAnswerTool, ToolCallingAgent
10
+
11
+ from modules.tools import image_description
12
+
13
  # (Keep Constants as is)
14
  # --- Constants ---
15
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
16
 
17
  # --- Basic Agent Definition ---
18
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
19
+
20
  class BasicAgent:
21
  def __init__(self):
22
  print("BasicAgent initialized.")
23
+
24
+ self.model = OpenAIServerModel(
25
+ model_id="gemini-2.5-flash-preview-05-20",
26
+ # Google Gemini OpenAI-compatible API base URL
27
+ api_base="https://generativelanguage.googleapis.com/v1beta/openai/",
28
+ api_key=GEMINI_API_KEY,
29
+ )
30
+
31
+ self.agent = CodeAgent (
32
+ tools = [DuckDuckGoSearchTool(), VisitWebpageTool(),
33
+ image_description,
34
+ # my_cumtom_tool_class()
35
+ ],
36
+ model = self.model,
37
+ max_steps = 10,
38
+ verbosity_level = 2,
39
+ additional_authorized_imports = ["datetime"]
40
+ )
41
+
42
  def __call__(self, question: str) -> str:
43
  print(f"Agent received question (first 50 chars): {question[:50]}...")
44
  fixed_answer = "This is a default answer."
modules/__init__.py ADDED
File without changes
modules/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (227 Bytes). View file
 
modules/__pycache__/prompts.cpython-312.pyc ADDED
Binary file (1.35 kB). View file
 
modules/__pycache__/tools.cpython-312.pyc ADDED
Binary file (1.54 kB). View file
 
modules/prompts.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ image_description_prompt = \
2
+ """
3
+ Analyze this image in detail to help answer the following question: '{question}'
4
+
5
+ Provide a structured description with the following sections:
6
+
7
+ 1. **Key Objects & Entities:** List all primary objects, people, text, symbols, and entities. Be specific about quantities, colors, and spatial relationships.
8
+ 2. **Visual Context & Setting:** Describe the environment, background, layout, and any relevant aesthetic or stylistic elements (e.g., "a vintage infographic," "a modern UI screenshot").
9
+ 3. **Data Presentation (if applicable):** If the image contains data, describe the chart type, axes labels, units, data trends, and approximate values. Ignore this section if not applicable.
10
+ 4. **Textual Content:** Transcribe all visible text exactly as it appears, including labels, headings, and fine print.
11
+ 5. **Relevance to Question:** Based on the above, briefly state which visual elements are most directly relevant to answering the user's question.
12
+
13
+ Your description must be factual, concise, and objective. Avoid speculation and interpretation not supported by visual evidence.
14
+ """
modules/tools.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from google import genai
3
+ from google.genai import types
4
+ from smolagents import tool
5
+ from modules.prompts import image_description_prompt
6
+
7
+ @tool
8
+ def image_description(question: str, image_url: str) -> str:
9
+ """
10
+ Recieves an image url and analyze it first, then return a what is needed based on prompt.
11
+ Args:
12
+ question: a question about given image
13
+ image_url: the url of image which should be considered as a reference for answering the question.
14
+ """
15
+ image_bytes = requests.get(image_url).content
16
+ image = types.Part.from_bytes(
17
+ data=image_bytes, mime_type="image/jpeg"
18
+ )
19
+
20
+ client = genai.Client()
21
+ response = client.models.generate_content(
22
+ model="gemini-2.5-flash",
23
+ contents=[image_description_prompt.format(question=question), image],
24
+ )
25
+
26
+ return response.text
requirements.txt CHANGED
@@ -1,2 +1,6 @@
1
  gradio
2
- requests
 
 
 
 
 
1
  gradio
2
+ gradio[oauth]
3
+ requests
4
+ smolagents
5
+ google-api-python-client
6
+ google-genai
tests/genai.ipynb ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "ed655f3a",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "from google import genai\n",
11
+ "\n",
12
+ "# The client gets the API key from the environment variable `GEMINI_API_KEY`.\n",
13
+ "client = genai.Client()\n",
14
+ "\n",
15
+ "response = client.models.generate_content(\n",
16
+ " model=\"gemini-2.5-flash\", contents=\"write a poem.\"\n",
17
+ ")\n",
18
+ "print(response.text)"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "id": "133682b5",
25
+ "metadata": {},
26
+ "outputs": [],
27
+ "source": [
28
+ "from google import genai\n",
29
+ "from google.genai import types\n",
30
+ "import requests\n",
31
+ "\n",
32
+ "@tool\n",
33
+ "def image_description(image_url: str, image_description_prompt: str) -> str:\n",
34
+ " \"\"\"\n",
35
+ " Recieves an image url and analyze it first, then return a what is needed based on prompt.\n",
36
+ " Args:\n",
37
+ " image_url: the url of image which should be described.\n",
38
+ " image_description_prompt: a reference prompt which control model output.\n",
39
+ " \"\"\"\n",
40
+ " image_bytes = requests.get(image_url).content\n",
41
+ " image = types.Part.from_bytes(\n",
42
+ " data=image_bytes, mime_type=\"image/jpeg\"\n",
43
+ " )\n",
44
+ " \n",
45
+ " client = genai.Client()\n",
46
+ " response = client.models.generate_content(\n",
47
+ " model=\"gemini-2.5-flash\",\n",
48
+ " contents=[image_description_prompt, image],\n",
49
+ " )\n",
50
+ "\n",
51
+ " return response.text"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "id": "bc350e9e",
58
+ "metadata": {},
59
+ "outputs": [],
60
+ "source": [
61
+ "from google import genai\n",
62
+ "from google.genai import types\n",
63
+ "\n",
64
+ "import requests\n",
65
+ "\n",
66
+ "image_path = \"https://goo.gle/instrument-img\"\n",
67
+ "image_bytes = requests.get(image_path).content\n",
68
+ "image = types.Part.from_bytes(\n",
69
+ " data=image_bytes, mime_type=\"image/jpeg\"\n",
70
+ ")\n",
71
+ "\n",
72
+ "client = genai.Client()\n",
73
+ "\n",
74
+ "response = client.models.generate_content(\n",
75
+ " model=\"gemini-2.5-flash\",\n",
76
+ " contents=[\"What is this image?\", image],\n",
77
+ ")\n",
78
+ "\n",
79
+ "print(response.text)"
80
+ ]
81
+ }
82
+ ],
83
+ "metadata": {
84
+ "kernelspec": {
85
+ "display_name": ".venv",
86
+ "language": "python",
87
+ "name": "python3"
88
+ },
89
+ "language_info": {
90
+ "codemirror_mode": {
91
+ "name": "ipython",
92
+ "version": 3
93
+ },
94
+ "file_extension": ".py",
95
+ "mimetype": "text/x-python",
96
+ "name": "python",
97
+ "nbconvert_exporter": "python",
98
+ "pygments_lexer": "ipython3",
99
+ "version": "3.12.3"
100
+ }
101
+ },
102
+ "nbformat": 4,
103
+ "nbformat_minor": 5
104
+ }