first iteration - basic chatbot with gift giving personality
Browse files
app.py
CHANGED
|
@@ -2,29 +2,40 @@ import gradio as gr
|
|
| 2 |
import random
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
|
| 5 |
-
client = InferenceClient(
|
| 6 |
-
|
| 7 |
-
def
|
| 8 |
-
|
| 9 |
-
yes_or_no = random.choice(choices)
|
| 10 |
-
return yes_or_no
|
| 11 |
-
|
| 12 |
-
def respond(message, history):
|
| 13 |
-
messages = [{"role": "system", "content": "You are a sassy chatbot from the 1800s."}]
|
| 14 |
if history:
|
| 15 |
messages.extend(history)
|
|
|
|
| 16 |
messages.append({"role": "user", "content": message})
|
| 17 |
-
|
| 18 |
response = ""
|
| 19 |
for message in client.chat_completion(
|
|
|
|
| 20 |
messages,
|
| 21 |
-
max_tokens =
|
| 22 |
stream = True,
|
|
|
|
|
|
|
|
|
|
| 23 |
):
|
| 24 |
token = message.choices[0].delta.content
|
| 25 |
response += token
|
| 26 |
yield response
|
|
|
|
| 27 |
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
-
chatbot.launch()
|
|
|
|
| 2 |
import random
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
|
| 5 |
+
client = InferenceClient('Qwen/Qwen2.5-72B-Instruct')
|
| 6 |
+
#client is where you can change the LLM model!
|
| 7 |
+
def respond(message,history):
|
| 8 |
+
messages = [{'role': 'system', 'content': 'You give really good gift ideas and are super helpful! You also tell me the price of each item.'}]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
if history:
|
| 10 |
messages.extend(history)
|
| 11 |
+
|
| 12 |
messages.append({"role": "user", "content": message})
|
| 13 |
+
|
| 14 |
response = ""
|
| 15 |
for message in client.chat_completion(
|
| 16 |
+
#max_tokens controls how many words your responses is
|
| 17 |
messages,
|
| 18 |
+
max_tokens = 1000,
|
| 19 |
stream = True,
|
| 20 |
+
|
| 21 |
+
#temperature = 0.8, #code a decimal between 0-2
|
| 22 |
+
#top_p = .65 #code a decimal between 0-1
|
| 23 |
):
|
| 24 |
token = message.choices[0].delta.content
|
| 25 |
response += token
|
| 26 |
yield response
|
| 27 |
+
#print(response["choices"][0]["message"]["content"].strip())
|
| 28 |
|
| 29 |
+
#yield response["choices"][0]["message"]["content"].strip()
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def echo(message,history):
|
| 33 |
+
choices = ['It is certain','It is decidedly so', 'Without a doubt','Yes-definitely','You may rely on it','As I see it, yes','Most Likely',
|
| 34 |
+
'Outlook good.','Yes','Signs point to yes','Dont count on it','My reply is no'
|
| 35 |
+
'No.','very doubtful','ask again','cannot predict now','better not tell you now']
|
| 36 |
+
|
| 37 |
+
random_message = random.choice(choices)
|
| 38 |
+
return random_message
|
| 39 |
+
chatbot = gr.ChatInterface(respond, type='messages')
|
| 40 |
|
| 41 |
+
chatbot.launch(debug=True)
|