Spaces:
Sleeping
Sleeping
File size: 974 Bytes
b2aa1c2 1d3c732 e42d9e6 b2aa1c2 d1fe01a bdbfec0 e42d9e6 af2d6cc bdbfec0 e42d9e6 af2d6cc 5d7decc af2d6cc e42d9e6 95d25ad af2d6cc bdbfec0 a517ca7 e42d9e6 af2d6cc 97effd9 6a6b446 b2aa1c2 bdbfec0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
import random
from huggingface_hub import InferenceClient
client = InferenceClient("google/gemma-3-27b-it")
# To change the LLM ^
def respond(message, history):
messages = [{"role": "system", "content": "You are a rude British chatbot."} # <to change the personality
]
if history:
messages.extend(history)
messages.append({"role": "user", "content" : message})
response = ""
for message in client.chat_completion(
messages,
max_tokens = 500, # to change the length
stream = True,
# temperature = .9, #randomization
# top_p = .7
):
token = message.choices[0].delta.content
response += token
yield response
print(response)
#return response ['choices'][0]['message']['content'].strip()
chatbot = gr.ChatInterface(respond, type = "messages") #theme = "shivi/calm_seafoam")
chatbot.launch(debug=True) |