Spaces:
Sleeping
Sleeping
File size: 1,182 Bytes
f1455e6 1e3ff24 5d0c508 33917e1 12b0693 33917e1 829b5e8 33917e1 ac329a1 33917e1 a6d784a f1455e6 33917e1 51ae59d f91927e 1e3ff24 f95790d 1e3ff24 ae203c1 f1455e6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
import gradio as gr
import random
from huggingface_hub import InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(message, history):
messages = [{"role": "system", "content": "You are a book influencer that is nice and friendly."}]
if history:
messages.extend(history)
messages.append({"role": "user", "content": message})
response = client.chat_completion(messages, max_tokens = 100, temperature = .2)
#connecting to llm, max caps response
return response['choices'][0]['message']['content'].strip()
# import lines go at the top! Any libraries I need to import go up ^
# def magic_eight_ball(message, history):
# return random.choice(['Possibly', 'Absolutely not', 'If you try', "Why not", 'Yes!'])
#def yes_or_no(message, history):
# return random.choice(['Yes','No'])
# always need two parameters in hf
#def echo(message, history):
# return message
print("hello world")
chatbot = gr.ChatInterface(respond, type="messages", title = "LLM Chatbox", theme = "gradio/soft")
# declaring chatbot so that user can interact and see their conversation history and send new messages
chatbot.launch() |