AkylaiBva commited on
Commit
1168c14
·
verified ·
1 Parent(s): 6ad0d6f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -35
app.py CHANGED
@@ -14,6 +14,7 @@ from transformers import (
14
  )
15
  import torch
16
  from typing import Optional
 
17
 
18
  # (Keep Constants as is)
19
  # --- Constants ---
@@ -32,42 +33,26 @@ from tools import (
32
  query_video
33
  )
34
 
 
35
 
36
  class BasicAgent:
37
- def __init__(self, model_choice="bertin-gpt-j-6B"):
38
- self.model_choice = model_choice
39
- self.device = "cuda" if torch.cuda.is_available() else "cpu"
40
-
41
- if model_choice == "bertin-gpt-j-6B":
42
- # Bilingual (Spanish/English) 6B model
43
- self.model = AutoModelForCausalLM.from_pretrained(
44
- "bertin-project/bertin-gpt-j-6B",
45
- torch_dtype=torch.float16, # Reduce memory usage
46
- low_cpu_mem_usage=True
47
- ).to(self.device)
48
- self.tokenizer = AutoTokenizer.from_pretrained("bertin-project/bertin-gpt-j-6B")
49
- elif model_choice == "redpajama-3B":
50
- # General-purpose 3B model
51
- self.model = AutoModelForCausalLM.from_pretrained(
52
- "togethercomputer/RedPajama-INCITE-Chat-3B-v1",
53
- torch_dtype=torch.float16
54
- ).to(self.device)
55
- self.tokenizer = AutoTokenizer.from_pretrained(
56
- "togethercomputer/RedPajama-INCITE-Chat-3B-v1"
57
- )
58
- else:
59
- raise ValueError(f"Unsupported model: {model_choice}")
60
 
61
- def __call__(self, question):
62
- # GPT-style models (bertin-gpt-j-6B, redpajama-3B)
63
- inputs = self.tokenizer(question, return_tensors="pt").to(self.device)
64
- outputs = self.model.generate(
65
- **inputs,
66
- max_new_tokens=100,
67
- do_sample=True,
68
- temperature=0.7
69
- )
70
- return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
71
 
72
  def run_and_submit_all( profile: gr.OAuthProfile | None):
73
  """
@@ -90,7 +75,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
90
 
91
  # 1. Instantiate Agent ( modify this part to create your agent)
92
  try:
93
- agent = BasicAgent(model_choice="bertin-gpt-j-6B")
94
  except Exception as e:
95
  print(f"Error instantiating agent: {e}")
96
  return f"Error initializing agent: {e}", None
@@ -133,7 +118,7 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
133
 
134
  submitted_answer = "def_answer"
135
  try:
136
- submitted_answer_raw = agent(question_text)
137
 
138
  if isinstance(submitted_answer_raw, list) and 'generated_text' in submitted_answer_raw[0]:
139
  submitted_answer = submitted_answer_raw[0]['generated_text'].strip()
 
14
  )
15
  import torch
16
  from typing import Optional
17
+ from huggingface_hub import InferenceClient
18
 
19
  # (Keep Constants as is)
20
  # --- Constants ---
 
33
  query_video
34
  )
35
 
36
+
37
 
38
  class BasicAgent:
39
+ def __init__(self):
40
+ self.model = InferenceClient()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
+ def __call__(self, question, question_attach):
43
+ print("Q: "+question="\nattachment: "+question_attach)
44
+ if question_attach!="":
45
+ return "attachment analysis is not implemented yet."
46
+ else:
47
+ if question.startswith("."):
48
+ print("it is reversed")
49
+ question = question[::-1]
50
+ print("normalized q:"+question)
51
+ prompt = f"""Answer this GAIA level 1 question:
52
+ Question: {question}
53
+ Answer:"""
54
+ response = self.model.text_generation(prompt, model="mistralai/Mistral-7B-Instruct-v0.1")
55
+ return response
56
 
57
  def run_and_submit_all( profile: gr.OAuthProfile | None):
58
  """
 
75
 
76
  # 1. Instantiate Agent ( modify this part to create your agent)
77
  try:
78
+ agent = BasicAgent()
79
  except Exception as e:
80
  print(f"Error instantiating agent: {e}")
81
  return f"Error initializing agent: {e}", None
 
118
 
119
  submitted_answer = "def_answer"
120
  try:
121
+ submitted_answer_raw = agent(question_text, question_attach)
122
 
123
  if isinstance(submitted_answer_raw, list) and 'generated_text' in submitted_answer_raw[0]:
124
  submitted_answer = submitted_answer_raw[0]['generated_text'].strip()