Update app.py
Browse files
app.py
CHANGED
|
@@ -157,7 +157,7 @@ def normalize_text(text):
|
|
| 157 |
words = word_tokenize(text)
|
| 158 |
return ' '.join(words)
|
| 159 |
|
| 160 |
-
|
| 161 |
repo_id="HuggingFaceH4/starchat2-15b-v0.1",
|
| 162 |
task="text-generation",
|
| 163 |
max_new_tokens=4096,
|
|
@@ -167,7 +167,7 @@ llm = HuggingFaceEndpoint(
|
|
| 167 |
repetition_penalty=1.2,
|
| 168 |
do_sample=True,
|
| 169 |
)
|
| 170 |
-
chat_model = ChatHuggingFace(llm=llm)
|
| 171 |
|
| 172 |
model_name = "sentence-transformers/all-mpnet-base-v2"
|
| 173 |
embedding_llm = SentenceTransformerEmbeddings(model_name=model_name)
|
|
@@ -208,13 +208,13 @@ def Chat_Message(history):
|
|
| 208 |
message=HumanMessage(content=history[-1][0])
|
| 209 |
messages1.append(message)
|
| 210 |
response = chat_model.invoke(messages1)
|
| 211 |
-
messages1.append(AIMessage(content=response
|
| 212 |
|
| 213 |
if len(messages1) >= 8:
|
| 214 |
messages1 = messages1[-8:]
|
| 215 |
|
| 216 |
history[-1][1] = ""
|
| 217 |
-
for character in response
|
| 218 |
history[-1][1] += character
|
| 219 |
time.sleep(0.0025)
|
| 220 |
yield history
|
|
@@ -243,13 +243,13 @@ def Web_Search(history):
|
|
| 243 |
msg=HumanMessage(content=augmented_prompt)
|
| 244 |
messages2.append(msg)
|
| 245 |
response = chat_model.invoke(messages2)
|
| 246 |
-
messages2.append(AIMessage(content=response
|
| 247 |
|
| 248 |
if len(messages2) >= 8:
|
| 249 |
messages2 = messages2[-8:]
|
| 250 |
|
| 251 |
history[-1][1] = ""
|
| 252 |
-
for character in response
|
| 253 |
history[-1][1] += character
|
| 254 |
time.sleep(0.0025)
|
| 255 |
yield history
|
|
@@ -271,12 +271,12 @@ def Chart_Generator(history):
|
|
| 271 |
messages3.append(prompt)
|
| 272 |
|
| 273 |
response = chat_model.invoke(messages3)
|
| 274 |
-
messages3.append(AIMessage(content=response
|
| 275 |
|
| 276 |
if len(messages3) >= 8:
|
| 277 |
messages3 = messages3[-8:]
|
| 278 |
|
| 279 |
-
combined_content = f'{image_html}<br>{response
|
| 280 |
else:
|
| 281 |
response_text = "Can't generate this image. Please provide valid chart details."
|
| 282 |
combined_content = response_text
|
|
@@ -285,12 +285,12 @@ def Chart_Generator(history):
|
|
| 285 |
messages3.append(prompt)
|
| 286 |
|
| 287 |
response = chat_model.invoke(messages3)
|
| 288 |
-
messages3.append(AIMessage(content=response
|
| 289 |
|
| 290 |
if len(messages3) >= 8:
|
| 291 |
messages3 = messages3[-8:]
|
| 292 |
|
| 293 |
-
combined_content=response
|
| 294 |
|
| 295 |
history[-1][1] = ""
|
| 296 |
for character in combined_content:
|
|
@@ -333,12 +333,12 @@ def Link_Scratch(history):
|
|
| 333 |
message = HumanMessage(content=augmented_prompt)
|
| 334 |
messages4.append(message)
|
| 335 |
response = chat_model.invoke(messages4)
|
| 336 |
-
messages4.append(AIMessage(content=response
|
| 337 |
|
| 338 |
if len(messages4) >= 1:
|
| 339 |
messages4 = messages4[-1:]
|
| 340 |
|
| 341 |
-
response_message = response
|
| 342 |
|
| 343 |
history[-1][1] = ""
|
| 344 |
for character in response_message:
|
|
@@ -391,12 +391,12 @@ def File_Interact(history,filepath):
|
|
| 391 |
message = HumanMessage(content=augmented_prompt)
|
| 392 |
messages5.append(message)
|
| 393 |
response = chat_model.invoke(messages5)
|
| 394 |
-
messages5.append(AIMessage(content=response
|
| 395 |
|
| 396 |
if len(messages5) >= 1:
|
| 397 |
messages5 = messages5[-1:]
|
| 398 |
|
| 399 |
-
response_message = response
|
| 400 |
|
| 401 |
history[-1][1] = ""
|
| 402 |
for character in response_message:
|
|
@@ -510,4 +510,4 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
| 510 |
chatbot.like(print_like_dislike, None, None)
|
| 511 |
|
| 512 |
demo.queue(max_size=10, default_concurrency_limit=4)
|
| 513 |
-
demo.launch(max_file_size="5mb",show_api=False,max_threads=50)
|
|
|
|
| 157 |
words = word_tokenize(text)
|
| 158 |
return ' '.join(words)
|
| 159 |
|
| 160 |
+
chat_model = HuggingFaceEndpoint(
|
| 161 |
repo_id="HuggingFaceH4/starchat2-15b-v0.1",
|
| 162 |
task="text-generation",
|
| 163 |
max_new_tokens=4096,
|
|
|
|
| 167 |
repetition_penalty=1.2,
|
| 168 |
do_sample=True,
|
| 169 |
)
|
| 170 |
+
# chat_model = ChatHuggingFace(llm=llm)
|
| 171 |
|
| 172 |
model_name = "sentence-transformers/all-mpnet-base-v2"
|
| 173 |
embedding_llm = SentenceTransformerEmbeddings(model_name=model_name)
|
|
|
|
| 208 |
message=HumanMessage(content=history[-1][0])
|
| 209 |
messages1.append(message)
|
| 210 |
response = chat_model.invoke(messages1)
|
| 211 |
+
messages1.append(AIMessage(content=response))
|
| 212 |
|
| 213 |
if len(messages1) >= 8:
|
| 214 |
messages1 = messages1[-8:]
|
| 215 |
|
| 216 |
history[-1][1] = ""
|
| 217 |
+
for character in response:
|
| 218 |
history[-1][1] += character
|
| 219 |
time.sleep(0.0025)
|
| 220 |
yield history
|
|
|
|
| 243 |
msg=HumanMessage(content=augmented_prompt)
|
| 244 |
messages2.append(msg)
|
| 245 |
response = chat_model.invoke(messages2)
|
| 246 |
+
messages2.append(AIMessage(content=response))
|
| 247 |
|
| 248 |
if len(messages2) >= 8:
|
| 249 |
messages2 = messages2[-8:]
|
| 250 |
|
| 251 |
history[-1][1] = ""
|
| 252 |
+
for character in response:
|
| 253 |
history[-1][1] += character
|
| 254 |
time.sleep(0.0025)
|
| 255 |
yield history
|
|
|
|
| 271 |
messages3.append(prompt)
|
| 272 |
|
| 273 |
response = chat_model.invoke(messages3)
|
| 274 |
+
messages3.append(AIMessage(content=response))
|
| 275 |
|
| 276 |
if len(messages3) >= 8:
|
| 277 |
messages3 = messages3[-8:]
|
| 278 |
|
| 279 |
+
combined_content = f'{image_html}<br>{response}'
|
| 280 |
else:
|
| 281 |
response_text = "Can't generate this image. Please provide valid chart details."
|
| 282 |
combined_content = response_text
|
|
|
|
| 285 |
messages3.append(prompt)
|
| 286 |
|
| 287 |
response = chat_model.invoke(messages3)
|
| 288 |
+
messages3.append(AIMessage(content=response))
|
| 289 |
|
| 290 |
if len(messages3) >= 8:
|
| 291 |
messages3 = messages3[-8:]
|
| 292 |
|
| 293 |
+
combined_content=response
|
| 294 |
|
| 295 |
history[-1][1] = ""
|
| 296 |
for character in combined_content:
|
|
|
|
| 333 |
message = HumanMessage(content=augmented_prompt)
|
| 334 |
messages4.append(message)
|
| 335 |
response = chat_model.invoke(messages4)
|
| 336 |
+
messages4.append(AIMessage(content=response))
|
| 337 |
|
| 338 |
if len(messages4) >= 1:
|
| 339 |
messages4 = messages4[-1:]
|
| 340 |
|
| 341 |
+
response_message = response
|
| 342 |
|
| 343 |
history[-1][1] = ""
|
| 344 |
for character in response_message:
|
|
|
|
| 391 |
message = HumanMessage(content=augmented_prompt)
|
| 392 |
messages5.append(message)
|
| 393 |
response = chat_model.invoke(messages5)
|
| 394 |
+
messages5.append(AIMessage(content=response))
|
| 395 |
|
| 396 |
if len(messages5) >= 1:
|
| 397 |
messages5 = messages5[-1:]
|
| 398 |
|
| 399 |
+
response_message = response
|
| 400 |
|
| 401 |
history[-1][1] = ""
|
| 402 |
for character in response_message:
|
|
|
|
| 510 |
chatbot.like(print_like_dislike, None, None)
|
| 511 |
|
| 512 |
demo.queue(max_size=10, default_concurrency_limit=4)
|
| 513 |
+
demo.launch(max_file_size="5mb", show_api=False, max_threads=50)
|