Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,17 +13,28 @@ import gradio as gr
|
|
| 13 |
from transformers import TextIteratorStreamer, AutoModelForSeq2SeqLM, AutoTokenizer, AutoConfig
|
| 14 |
from huggingface_hub import InferenceClient
|
| 15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
# Define device and load model and tokenizer
|
| 17 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 18 |
MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"
|
| 19 |
|
|
|
|
|
|
|
|
|
|
| 20 |
# Load model and tokenizer, with specific handling for the Phi-3 model
|
| 21 |
try:
|
|
|
|
| 22 |
config = AutoConfig.from_pretrained(MODEL_NAME)
|
| 23 |
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, config=config).to(DEVICE)
|
| 24 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
|
|
|
| 25 |
except ValueError as e:
|
| 26 |
-
|
| 27 |
# Fallback to using InferenceClient from Hugging Face Hub
|
| 28 |
client = InferenceClient(model=MODEL_NAME)
|
| 29 |
model = None
|
|
@@ -65,6 +76,7 @@ def extract_text_from_webpage(html_content):
|
|
| 65 |
|
| 66 |
# Function to perform a Google search and return the results
|
| 67 |
def search(term, num_results=2, lang="en", timeout=5, safe="active", ssl_verify=None):
|
|
|
|
| 68 |
escaped_term = urllib.parse.quote_plus(term)
|
| 69 |
start = 0
|
| 70 |
all_results = []
|
|
@@ -72,46 +84,52 @@ def search(term, num_results=2, lang="en", timeout=5, safe="active", ssl_verify=
|
|
| 72 |
|
| 73 |
with requests.Session() as session:
|
| 74 |
while start < num_results:
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 111 |
return all_results
|
| 112 |
|
| 113 |
# Function to format the prompt for the language model
|
| 114 |
def format_prompt(user_prompt, chat_history):
|
|
|
|
| 115 |
prompt = "<s>"
|
| 116 |
for item in chat_history:
|
| 117 |
if isinstance(item, tuple):
|
|
@@ -119,6 +137,7 @@ def format_prompt(user_prompt, chat_history):
|
|
| 119 |
else:
|
| 120 |
prompt += f" [Image] "
|
| 121 |
prompt += f"[INST] {user_prompt} [/INST]"
|
|
|
|
| 122 |
return prompt
|
| 123 |
|
| 124 |
# Function for model inference
|
|
@@ -132,7 +151,9 @@ def model_inference(
|
|
| 132 |
repetition_penalty,
|
| 133 |
top_p,
|
| 134 |
):
|
|
|
|
| 135 |
if not isinstance(user_prompt, dict):
|
|
|
|
| 136 |
return "Invalid input format. Expected a dictionary."
|
| 137 |
|
| 138 |
if "files" not in user_prompt:
|
|
@@ -140,6 +161,7 @@ def model_inference(
|
|
| 140 |
|
| 141 |
if not user_prompt["files"]:
|
| 142 |
if web_search:
|
|
|
|
| 143 |
web_results = search(user_prompt["text"])
|
| 144 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
| 145 |
formatted_prompt = format_prompt(f"{user_prompt['text']} [WEB] {web2}", chat_history)
|
|
@@ -156,6 +178,7 @@ def model_inference(
|
|
| 156 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 157 |
else:
|
| 158 |
response = client.generate(formatted_prompt)
|
|
|
|
| 159 |
return response
|
| 160 |
else:
|
| 161 |
formatted_prompt = format_prompt(user_prompt["text"], chat_history)
|
|
@@ -172,6 +195,7 @@ def model_inference(
|
|
| 172 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 173 |
else:
|
| 174 |
response = client.generate(formatted_prompt)
|
|
|
|
| 175 |
return response
|
| 176 |
else:
|
| 177 |
return "Image input not supported in this implementation."
|
|
@@ -235,10 +259,11 @@ chatbot = gr.Chatbot(
|
|
| 235 |
|
| 236 |
# Define Gradio interface
|
| 237 |
def chat_interface(user_input, history, web_search, decoding_strategy, temperature, max_new_tokens, repetition_penalty, top_p):
|
| 238 |
-
|
| 239 |
-
|
|
|
|
| 240 |
response = model_inference(
|
| 241 |
-
|
| 242 |
history,
|
| 243 |
web_search,
|
| 244 |
decoding_strategy,
|
|
@@ -247,7 +272,8 @@ def chat_interface(user_input, history, web_search, decoding_strategy, temperatu
|
|
| 247 |
repetition_penalty,
|
| 248 |
top_p,
|
| 249 |
)
|
| 250 |
-
history.append((user_input, response))
|
|
|
|
| 251 |
return history, history
|
| 252 |
|
| 253 |
# Create Gradio interface
|
|
@@ -272,4 +298,5 @@ interface = gr.Interface(
|
|
| 272 |
)
|
| 273 |
|
| 274 |
if __name__ == "__main__":
|
|
|
|
| 275 |
interface.launch()
|
|
|
|
| 13 |
from transformers import TextIteratorStreamer, AutoModelForSeq2SeqLM, AutoTokenizer, AutoConfig
|
| 14 |
from huggingface_hub import InferenceClient
|
| 15 |
|
| 16 |
+
import logging
|
| 17 |
+
|
| 18 |
+
# Set up logging
|
| 19 |
+
logging.basicConfig(level=logging.DEBUG)
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
# Define device and load model and tokenizer
|
| 23 |
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 24 |
MODEL_NAME = "microsoft/Phi-3-mini-4k-instruct"
|
| 25 |
|
| 26 |
+
# Update transformers library
|
| 27 |
+
!pip install --upgrade transformers
|
| 28 |
+
|
| 29 |
# Load model and tokenizer, with specific handling for the Phi-3 model
|
| 30 |
try:
|
| 31 |
+
logger.debug("Attempting to load the model and tokenizer")
|
| 32 |
config = AutoConfig.from_pretrained(MODEL_NAME)
|
| 33 |
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME, config=config).to(DEVICE)
|
| 34 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
| 35 |
+
logger.debug("Model and tokenizer loaded successfully")
|
| 36 |
except ValueError as e:
|
| 37 |
+
logger.error(f"Error loading model: {e}")
|
| 38 |
# Fallback to using InferenceClient from Hugging Face Hub
|
| 39 |
client = InferenceClient(model=MODEL_NAME)
|
| 40 |
model = None
|
|
|
|
| 76 |
|
| 77 |
# Function to perform a Google search and return the results
|
| 78 |
def search(term, num_results=2, lang="en", timeout=5, safe="active", ssl_verify=None):
|
| 79 |
+
logger.debug(f"Starting search for term: {term}")
|
| 80 |
escaped_term = urllib.parse.quote_plus(term)
|
| 81 |
start = 0
|
| 82 |
all_results = []
|
|
|
|
| 84 |
|
| 85 |
with requests.Session() as session:
|
| 86 |
while start < num_results:
|
| 87 |
+
try:
|
| 88 |
+
resp = session.get(
|
| 89 |
+
url="https://www.google.com/search",
|
| 90 |
+
headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"},
|
| 91 |
+
params={
|
| 92 |
+
"q": term,
|
| 93 |
+
"num": num_results - start,
|
| 94 |
+
"hl": lang,
|
| 95 |
+
"start": start,
|
| 96 |
+
"safe": safe,
|
| 97 |
+
},
|
| 98 |
+
timeout=timeout,
|
| 99 |
+
verify=ssl_verify,
|
| 100 |
+
)
|
| 101 |
+
resp.raise_for_status()
|
| 102 |
+
soup = BeautifulSoup(resp.text, "html.parser")
|
| 103 |
+
result_block = soup.find_all("div", attrs={"class": "g"})
|
| 104 |
+
if not result_block:
|
| 105 |
+
start += 1
|
| 106 |
+
continue
|
| 107 |
+
for result in result_block:
|
| 108 |
+
link = result.find("a", href=True)
|
| 109 |
+
if link:
|
| 110 |
+
link = link["href"]
|
| 111 |
+
try:
|
| 112 |
+
webpage = session.get(link, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0"})
|
| 113 |
+
webpage.raise_for_status()
|
| 114 |
+
visible_text = extract_text_from_webpage(webpage.text)
|
| 115 |
+
if len(visible_text) > max_chars_per_page:
|
| 116 |
+
visible_text = visible_text[:max_chars_per_page] + "..."
|
| 117 |
+
all_results.append({"link": link, "text": visible_text})
|
| 118 |
+
except requests.exceptions.RequestException as e:
|
| 119 |
+
logger.error(f"Error fetching or processing {link}: {e}")
|
| 120 |
+
all_results.append({"link": link, "text": None})
|
| 121 |
+
else:
|
| 122 |
+
all_results.append({"link": None, "text": None})
|
| 123 |
+
start += len(result_block)
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logger.error(f"Error during search: {e}")
|
| 126 |
+
break
|
| 127 |
+
logger.debug(f"Search results: {all_results}")
|
| 128 |
return all_results
|
| 129 |
|
| 130 |
# Function to format the prompt for the language model
|
| 131 |
def format_prompt(user_prompt, chat_history):
|
| 132 |
+
logger.debug(f"Formatting prompt with user prompt: {user_prompt} and chat history: {chat_history}")
|
| 133 |
prompt = "<s>"
|
| 134 |
for item in chat_history:
|
| 135 |
if isinstance(item, tuple):
|
|
|
|
| 137 |
else:
|
| 138 |
prompt += f" [Image] "
|
| 139 |
prompt += f"[INST] {user_prompt} [/INST]"
|
| 140 |
+
logger.debug(f"Formatted prompt: {prompt}")
|
| 141 |
return prompt
|
| 142 |
|
| 143 |
# Function for model inference
|
|
|
|
| 151 |
repetition_penalty,
|
| 152 |
top_p,
|
| 153 |
):
|
| 154 |
+
logger.debug(f"Starting model inference with user prompt: {user_prompt}, chat history: {chat_history}, web_search: {web_search}")
|
| 155 |
if not isinstance(user_prompt, dict):
|
| 156 |
+
logger.error("Invalid input format. Expected a dictionary.")
|
| 157 |
return "Invalid input format. Expected a dictionary."
|
| 158 |
|
| 159 |
if "files" not in user_prompt:
|
|
|
|
| 161 |
|
| 162 |
if not user_prompt["files"]:
|
| 163 |
if web_search:
|
| 164 |
+
logger.debug("Performing web search")
|
| 165 |
web_results = search(user_prompt["text"])
|
| 166 |
web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
|
| 167 |
formatted_prompt = format_prompt(f"{user_prompt['text']} [WEB] {web2}", chat_history)
|
|
|
|
| 178 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 179 |
else:
|
| 180 |
response = client.generate(formatted_prompt)
|
| 181 |
+
logger.debug(f"Model response: {response}")
|
| 182 |
return response
|
| 183 |
else:
|
| 184 |
formatted_prompt = format_prompt(user_prompt["text"], chat_history)
|
|
|
|
| 195 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 196 |
else:
|
| 197 |
response = client.generate(formatted_prompt)
|
| 198 |
+
logger.debug(f"Model response: {response}")
|
| 199 |
return response
|
| 200 |
else:
|
| 201 |
return "Image input not supported in this implementation."
|
|
|
|
| 259 |
|
| 260 |
# Define Gradio interface
|
| 261 |
def chat_interface(user_input, history, web_search, decoding_strategy, temperature, max_new_tokens, repetition_penalty, top_p):
|
| 262 |
+
logger.debug(f"Chat interface called with user_input: {user_input}")
|
| 263 |
+
if isinstance(user_input, str):
|
| 264 |
+
user_input = {"text": user_input, "files": []}
|
| 265 |
response = model_inference(
|
| 266 |
+
user_input,
|
| 267 |
history,
|
| 268 |
web_search,
|
| 269 |
decoding_strategy,
|
|
|
|
| 272 |
repetition_penalty,
|
| 273 |
top_p,
|
| 274 |
)
|
| 275 |
+
history.append((user_input["text"], response))
|
| 276 |
+
logger.debug(f"Updated chat history: {history}")
|
| 277 |
return history, history
|
| 278 |
|
| 279 |
# Create Gradio interface
|
|
|
|
| 298 |
)
|
| 299 |
|
| 300 |
if __name__ == "__main__":
|
| 301 |
+
logger.debug("Launching Gradio interface")
|
| 302 |
interface.launch()
|