mmargg commited on
Commit
244b6bb
·
verified ·
1 Parent(s): 6eecb3a
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -121,7 +121,6 @@ cleaned_chunks = preprocess_text(academic_tips_text)
121
  chunk_embeddings = create_embeddings(cleaned_chunks)
122
  #AI API being used
123
  client= InferenceClient("openai/gpt-oss-20b")
124
- response=""
125
  #defining role of AI and user
126
 
127
  def respond(message,history):
@@ -131,8 +130,8 @@ def respond(message,history):
131
 
132
  if history:
133
  messages.extend(history) #keep adding history
134
-
135
- messages.append({"role":"user", "content": message})
136
 
137
  response=client.chat_completion(messages, max_tokens=100) #capping how many words the LLM is allowed to generate as a respond (100 words)
138
 
 
121
  chunk_embeddings = create_embeddings(cleaned_chunks)
122
  #AI API being used
123
  client= InferenceClient("openai/gpt-oss-20b")
 
124
  #defining role of AI and user
125
 
126
  def respond(message,history):
 
130
 
131
  if history:
132
  messages.extend(history) #keep adding history
133
+
134
+ messages.append({"role":"user","content": message})
135
 
136
  response=client.chat_completion(messages, max_tokens=100) #capping how many words the LLM is allowed to generate as a respond (100 words)
137