Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -6,13 +6,22 @@ client = InferenceClient("google/gemma-3-27b-it") #change the LLM "HuggingFaceH4
|
|
| 6 |
|
| 7 |
def respond(message, history):
|
| 8 |
|
| 9 |
-
messages = [{"role": "system", "content": "You are
|
| 10 |
if history:
|
| 11 |
messages.extend(history)
|
| 12 |
|
| 13 |
messages.append({"role" : "user", "content" : message})
|
| 14 |
-
|
| 15 |
-
response =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
print(response["choices"][0]["message"]["content"].strip())
|
| 18 |
|
|
|
|
| 6 |
|
| 7 |
def respond(message, history):
|
| 8 |
|
| 9 |
+
messages = [{"role": "system", "content": "You are the goofy nerd in highschool"}] # "content" is where u can change the personality
|
| 10 |
if history:
|
| 11 |
messages.extend(history)
|
| 12 |
|
| 13 |
messages.append({"role" : "user", "content" : message})
|
| 14 |
+
|
| 15 |
+
response = ""
|
| 16 |
+
for messages in client.chat_completion(
|
| 17 |
+
messages,
|
| 18 |
+
max_tokens = 130,
|
| 19 |
+
stream=True,
|
| 20 |
+
):
|
| 21 |
+
token = messages. choices[0].delta.content
|
| 22 |
+
response += token
|
| 23 |
+
yield response
|
| 24 |
+
#max_tokens is length
|
| 25 |
|
| 26 |
print(response["choices"][0]["message"]["content"].strip())
|
| 27 |
|