KatieKhamarkhanova commited on
Commit
67602dc
·
verified ·
1 Parent(s): 3ebb64e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -15
app.py CHANGED
@@ -3,7 +3,7 @@ from huggingface_hub import InferenceClient
3
  from sentence_transformers import SentenceTransformer
4
  import torch
5
  import numpy as np
6
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
7
 
8
  # Open the ECOsphere.txt file in read mode with UTF-8 encoding
9
  with open("ECOsphere.txt", "r", encoding="utf-8") as file:
@@ -11,23 +11,23 @@ with open("ECOsphere.txt", "r", encoding="utf-8") as file:
11
  ECOsphere_text = file.read()
12
 
13
  def respond(message, history):
14
- top_results = get_top_chunks( message , chunk_embeddings, cleaned_chunks) # Complete this line
15
- # Print the top results
16
- print(top_results)
17
- messages = [{ "role": "system", "content": f"You are a chatbot that encourage people to live more sustainably. Base your response on the following action {top_results}" }]
18
-
 
19
  if history:
20
  messages.extend(history)
21
-
22
  messages.append({"role": "user", "content": message})
23
-
24
  response = client.chat_completion(
25
- messages,
26
- max_tokens = 1000,
27
- temperature = 0.5
 
28
  )
29
- print(response)
30
- return response["choices"][0]["message"]["content"].strip()
31
 
32
  chatbot = gr.ChatInterface(respond, type="messages")
33
  cleaned_chunks = []
@@ -35,8 +35,7 @@ def preprocess_text(text):
35
  # Strip extra whitespace from the beginning and the end of the text
36
  cleaned_text = text.strip()
37
 
38
- # Split the cleaned_text by every newline character (\n)
39
- chunks = cleaned_text.split("\n")
40
 
41
  # Create an empty list to store cleaned chunks
42
  #cleaned_chunks = []
 
3
  from sentence_transformers import SentenceTransformer
4
  import torch
5
  import numpy as np
6
+ client = InferenceClient("Qwen/Qwen2.5-7B-Instruct-1M")
7
 
8
  # Open the ECOsphere.txt file in read mode with UTF-8 encoding
9
  with open("ECOsphere.txt", "r", encoding="utf-8") as file:
 
11
  ECOsphere_text = file.read()
12
 
13
  def respond(message, history):
14
+ top_results = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
15
+ print("Top results:", top_results)
16
+
17
+ messages = [
18
+ {"role": "system", "content": f"You are a chatbot that encourages people to live more sustainably. Base your response on: {top_results}"}
19
+ ]
20
  if history:
21
  messages.extend(history)
 
22
  messages.append({"role": "user", "content": message})
23
+
24
  response = client.chat_completion(
25
+ model="Qwen/Qwen2.5-7B-Instruct",
26
+ messages=messages,
27
+ max_tokens=200,
28
+ temperature=0.5
29
  )
30
+ return response.choices[0].message["content"].strip()
 
31
 
32
  chatbot = gr.ChatInterface(respond, type="messages")
33
  cleaned_chunks = []
 
35
  # Strip extra whitespace from the beginning and the end of the text
36
  cleaned_text = text.strip()
37
 
38
+ chunks = cleaned_text.split('*'))
 
39
 
40
  # Create an empty list to store cleaned chunks
41
  #cleaned_chunks = []