Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,6 +5,12 @@ import inspect
|
|
| 5 |
import pandas as pd
|
| 6 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 7 |
import torch
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
# (Keep Constants as is)
|
| 10 |
# --- Constants ---
|
|
@@ -23,31 +29,26 @@ class BasicAgent:
|
|
| 23 |
|
| 24 |
class ZephyrAgent:
|
| 25 |
def __init__(self):
|
| 26 |
-
|
| 27 |
-
self.headers = {
|
| 28 |
-
"Authorization": f"Bearer {os.getenv('HF_TOKEN')}"
|
| 29 |
-
}
|
| 30 |
-
print("ZephyrAPI initialized using Inference API.")
|
| 31 |
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
"max_new_tokens": 256,
|
| 38 |
-
"temperature": 0.7,
|
| 39 |
-
"top_p": 0.9,
|
| 40 |
-
}
|
| 41 |
-
}
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
try:
|
| 44 |
-
|
| 45 |
-
response.
|
| 46 |
-
|
| 47 |
-
return result[0]["generated_text"].split("<|assistant|>")[-1].strip()
|
| 48 |
except Exception as e:
|
| 49 |
-
print(f"
|
| 50 |
-
return "⚠️
|
| 51 |
|
| 52 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 53 |
"""
|
|
|
|
| 5 |
import pandas as pd
|
| 6 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 7 |
import torch
|
| 8 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 9 |
+
from langchain_community.llms import HuggingFacePipeline
|
| 10 |
+
from tools import tools
|
| 11 |
+
from langchain_core.messages import HumanMessage
|
| 12 |
+
from langgraph.prebuilt import ToolExecutor, chat_agent_executor
|
| 13 |
+
|
| 14 |
|
| 15 |
# (Keep Constants as is)
|
| 16 |
# --- Constants ---
|
|
|
|
| 29 |
|
| 30 |
class ZephyrAgent:
|
| 31 |
def __init__(self):
|
| 32 |
+
print("Initializing local Zephyr model with tools...")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
+
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
|
| 35 |
+
model = AutoModelForCausalLM.from_pretrained("HuggingFaceH4/zephyr-7b-beta", torch_dtype=torch.float16, device_map="auto")
|
| 36 |
+
|
| 37 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512)
|
| 38 |
+
llm = HuggingFacePipeline(pipeline=pipe)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
+
# Wrap tools
|
| 41 |
+
tool_executor = ToolExecutor(tools)
|
| 42 |
+
self.agent_executor = chat_agent_executor.create_chat_agent_executor(llm=llm, tools=tools)
|
| 43 |
+
|
| 44 |
+
def __call__(self, question: str) -> str:
|
| 45 |
try:
|
| 46 |
+
message = HumanMessage(content=question)
|
| 47 |
+
response = self.agent_executor.invoke({"messages": [message]})
|
| 48 |
+
return response.content
|
|
|
|
| 49 |
except Exception as e:
|
| 50 |
+
print(f"Tool-augmented ZephyrAgent error: {e}")
|
| 51 |
+
return "⚠️ Agent failed to answer due to tool or model error."
|
| 52 |
|
| 53 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
| 54 |
"""
|