Sheepbon commited on
Commit
ed4bb55
·
verified ·
1 Parent(s): 4c36b4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -15
app.py CHANGED
@@ -1,11 +1,12 @@
1
  from huggingface_hub import InferenceClient
 
2
  #STEP1FROMSEMANTICSEARCH (import libraries)
3
  from sentence_transformers import SentenceTransformer
4
  import torch
5
  import gradio as gr
6
  import random
7
 
8
- client=InferenceClient("Qwen/Qwen2.5-72B-Instruct")
9
  #deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
10
 
11
  # Open the water_cycle.txt file in read mode with UTF-8 encoding - step 2 from semantic search
@@ -46,27 +47,17 @@ def preprocess_text(text):
46
  cleaned_chunks = preprocess_text(recipes_text) # Complete this line
47
 
48
  def respond(message, history):
49
- #responses = ["Yes", "No"]
50
- #return random.choice(responses)
51
-
52
- messages = [
53
- {"role":"system",
54
- "content": "You are a chatbot that is very sweet and kind"
55
- }
56
- ]
57
-
58
 
59
  if history:
60
  messages.extend(history)
61
 
62
- messages.append(
63
- {"role":"user",
64
- "content": "message"}
65
- )
66
 
67
- response = client.chat_completion(messages, max_tokens=100, temperature=1.3, top_p=.2)
68
  #temperature and top_p control randomness
69
 
 
70
 
71
  return response['choices'][0]['message']['content'].strip()
72
 
 
1
  from huggingface_hub import InferenceClient
2
+
3
  #STEP1FROMSEMANTICSEARCH (import libraries)
4
  from sentence_transformers import SentenceTransformer
5
  import torch
6
  import gradio as gr
7
  import random
8
 
9
+ client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
10
  #deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
11
 
12
  # Open the water_cycle.txt file in read mode with UTF-8 encoding - step 2 from semantic search
 
47
  cleaned_chunks = preprocess_text(recipes_text) # Complete this line
48
 
49
  def respond(message, history):
50
+ messages = [{"role":"system", "content": "You are a chatbot that is very sweet and kind"}]
 
 
 
 
 
 
 
 
51
 
52
  if history:
53
  messages.extend(history)
54
 
55
+ messages.append({"role":"user","content": message})
 
 
 
56
 
57
+ response = client.chat_completion(messages, max_tokens = 100, temperature = 1.3, top_p = .3)
58
  #temperature and top_p control randomness
59
 
60
+ print(response)
61
 
62
  return response['choices'][0]['message']['content'].strip()
63