Spaces:
Sleeping
Sleeping
File size: 1,389 Bytes
ec27854 06eaf0e 8510ce1 ec27854 6b2c366 8510ce1 4aab119 8510ce1 280cb6e 8510ce1 08234af f040a8b 08234af 9df5ada f040a8b 08234af 9df5ada 08234af 8510ce1 ec27854 8515892 8510ce1 8515892 8510ce1 4aab119 ff539da 4aab119 bee2caa 943436b ff539da 4d2a151 00625c5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
import random
from huggingface_hub import InferenceClient
client = InferenceClient("google/gemma-3-27b-it") #change the LLM "HuggingFaceH4/zephyr-7b-beta"
def respond_miles(message, history):
messages = [{"role": "system", "content": "Your name is Miles. You are the goofy nerd in highschool"}] # "content" is where u can change the personality
if history:
messages.extend(history)
messages.append({"role" : "user", "content" : message})
response = ""
for message in client.chat_completion(
messages,
max_tokens = 130,
stream=True,
):
token = message.choices[0].delta.content
response += token
yield response
#max_tokens is length
print(response["choices"][0]["message"]["content"].strip())
return response["choices"][0]["message"]["content"].strip()
def random_response(message, history):
my_list = ["absolutely", "okay"]
return random.choice(my_list)
random.choice(responses)
print(random_response)
chatbot = gr.ChatInterface(
fn=respond_miles,
type = "messages",
title = "Meet Miles!",
description = "That nerdy boy from high school.",
theme = gr.themes.Ocean(primary_hue="green", secondary_hue="green", neutral_hue="green",
))
chatbot.launch(debug=True)
|