ewingreen commited on
Commit
b5287f9
·
verified ·
1 Parent(s): bc8279c

Update with new model google/gemma-2-2b-it

Browse files
Files changed (1) hide show
  1. app.py +5 -4
app.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
  import numpy as np
6
 
7
  # this client will handle making requests to the model to generate responses
8
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
9
 
10
  # Load and process the knowledge base text file
11
  with open("knowledge.txt", "r", encoding="utf-8") as f:
@@ -52,11 +52,12 @@ def respond(message, history):
52
  messages = [{"role": "system",
53
  "content": system_message}]
54
 
55
- # add all previous messages to the messages list
56
  if history:
57
- messages.extend(history)
 
 
58
 
59
- # add the current users message to the messages list
60
  messages.append({"role": "user", "content": message})
61
 
62
  # makes the chat completion API call,
 
5
  import numpy as np
6
 
7
  # this client will handle making requests to the model to generate responses
8
+ client = InferenceClient("google/gemma-2-2b-it")
9
 
10
  # Load and process the knowledge base text file
11
  with open("knowledge.txt", "r", encoding="utf-8") as f:
 
52
  messages = [{"role": "system",
53
  "content": system_message}]
54
 
 
55
  if history:
56
+ for user_msg, assistant_msg in history:
57
+ messages.append({"role": "user", "content": user_msg})
58
+ messages.append({"role": "assistant", "content": assistant_msg})
59
 
60
+ # add the current user's message to the messages list
61
  messages.append({"role": "user", "content": message})
62
 
63
  # makes the chat completion API call,