Spaces:
Sleeping
Sleeping
File size: 3,684 Bytes
bf50024 43b236c bf50024 43b236c bf50024 43b236c 074f4df 43b236c bf50024 074f4df bf50024 43b236c bf50024 074f4df 43b236c bf50024 43b236c bf50024 43b236c bf50024 43b236c bf50024 43b236c 074f4df 43b236c 074f4df 43b236c 074f4df bf50024 43b236c bf50024 43b236c bf50024 074f4df bf50024 43b236c bf50024 43b236c bf50024 43b236c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 | from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import requests
from bs4 import BeautifulSoup
app = FastAPI()
MODEL_NAME = "microsoft/phi-1_5"
print("Loading model...")
torch.set_num_threads(2)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
dtype=torch.float32,
low_cpu_mem_usage=True
)
model.to("cpu")
print("Model loaded!")
# -------- REQUEST SCHEMA --------
class RequestData(BaseModel):
prompt: str
history: list = []
use_search: bool = True
# -------- ROOT ROUTE --------
@app.get("/")
def home():
return {"message": "API is running"}
# -------- TOOL 1: SEARCH --------
def search_links(query):
url = f"https://duckduckgo.com/html/?q={query}"
headers = {"User-Agent": "Mozilla/5.0"}
try:
res = requests.get(url, headers=headers, timeout=10)
soup = BeautifulSoup(res.text, "html.parser")
links = []
for a in soup.select("a.result__a"):
href = a.get("href")
if href:
links.append(href)
return links[:3]
except:
return []
# -------- TOOL 2: OPEN PAGE --------
def extract_page_text(url):
try:
res = requests.get(url, timeout=10, headers={"User-Agent": "Mozilla/5.0"})
soup = BeautifulSoup(res.text, "html.parser")
for tag in soup(["script", "style"]):
tag.decompose()
text = soup.get_text(separator=" ")
return text[:2000]
except:
return ""
# -------- TOOL 3: BROWSE --------
def browse_web(query):
links = search_links(query)
contents = []
for link in links:
page = extract_page_text(link)
if page:
contents.append(page)
return "\n\n".join(contents[:3])
# -------- MEMORY BUILDER (FIXED) --------
def build_prompt(prompt, history):
convo = ""
for msg in history:
if isinstance(msg, dict):
if msg.get("role") == "user":
convo += f"User: {msg.get('content')}\n"
elif msg.get("role") == "assistant":
convo += f"Assistant: {msg.get('content')}\n"
convo += f"User: {prompt}\nAssistant:"
return convo
# -------- GENERATION (FIXED OUTPUT) --------
def generate_text(prompt):
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
outputs = model.generate(
inputs["input_ids"],
max_new_tokens=120,
temperature=0.7,
do_sample=True
)
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Remove prompt from output
return full_text[len(prompt):].strip()
# -------- AGENT LOOP --------
def agent(prompt, history, use_search=True):
base_prompt = build_prompt(prompt, history)
decision_prompt = f"""
You are an AI agent.
User question:
{prompt}
Should you search the web? Answer YES or NO.
"""
decision = generate_text(decision_prompt).lower()
if use_search and "yes" in decision:
web_data = browse_web(prompt)
final_prompt = f"""
You are an AI assistant with access to web data.
Conversation:
{base_prompt}
Web Data:
{web_data}
Answer clearly and accurately:
"""
else:
final_prompt = base_prompt
return generate_text(final_prompt)
# -------- API ENDPOINT --------
@app.post("/generate")
def generate(data: RequestData):
response = agent(
prompt=data.prompt,
history=data.history,
use_search=data.use_search
)
return {"response": response} |