MariaFilipkowska commited on
Commit
33ca046
·
verified ·
1 Parent(s): bf7e313

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -8
app.py CHANGED
@@ -3,7 +3,8 @@ import gradio as gr
3
  import requests
4
  import pandas as pd
5
  import base64
6
- import ollama
 
7
  from typing import TypedDict, Annotated
8
  from langchain_core.messages import AnyMessage, SystemMessage
9
  from langchain_community.tools import DuckDuckGoSearchRun
@@ -13,10 +14,18 @@ from langgraph.graph import START, StateGraph
13
  from langgraph.graph.message import add_messages
14
  from langgraph.prebuilt import ToolNode, tools_condition
15
 
 
 
16
  # (Keep Constants as is)
17
  # --- Constants ---
18
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
19
 
 
 
 
 
 
 
20
 
21
  def get_file_path(task_id: str, question) -> str:
22
  """Retrieves reference file path."""
@@ -81,13 +90,12 @@ extract_text_tool = Tool(
81
  description="Retrieves text from an image."
82
  )
83
 
84
- # Generate the chat interface, including the tools
85
- # print("Loading LLM...")
86
- # ollama.pull('gemma3:4b')
87
- from transformers import AutoProcessor, AutoModelForImageTextToText
88
-
89
- processor = AutoProcessor.from_pretrained("google/gemma-3-4b-it")
90
- model = AutoModelForImageTextToText.from_pretrained("google/gemma-3-4b-it")
91
 
92
  chat = ChatOllama(model=model, verbose=True)
93
  print(f"Model {chat.model} downloaded!")
 
3
  import requests
4
  import pandas as pd
5
  import base64
6
+
7
+ from dotenv import load_dotenv
8
  from typing import TypedDict, Annotated
9
  from langchain_core.messages import AnyMessage, SystemMessage
10
  from langchain_community.tools import DuckDuckGoSearchRun
 
14
  from langgraph.graph.message import add_messages
15
  from langgraph.prebuilt import ToolNode, tools_condition
16
 
17
+ from huggingface_hub import login, InferenceClient
18
+
19
  # (Keep Constants as is)
20
  # --- Constants ---
21
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
22
 
23
+ load_dotenv()
24
+ login(os.getenv("HUGGINGFACEHUB_API_TOKEN"))
25
+
26
+ model_id = "google/gemma-3-4b-it"
27
+
28
+ client = InferenceClient(model=model_id)
29
 
30
  def get_file_path(task_id: str, question) -> str:
31
  """Retrieves reference file path."""
 
90
  description="Retrieves text from an image."
91
  )
92
 
93
+ model = HfApiModel(
94
+ max_tokens=2096,
95
+ temperature=0.5,
96
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
97
+ custom_role_conversions=None,
98
+ )
 
99
 
100
  chat = ChatOllama(model=model, verbose=True)
101
  print(f"Model {chat.model} downloaded!")