llm_chatbot / app.py
ritikaaA's picture
added temperature parameter
ac329a1 verified
import gradio as gr
import random
from huggingface_hub import InferenceClient
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(message, history):
messages = [{"role": "system", "content": "You are a book influencer that is nice and friendly."}]
if history:
messages.extend(history)
messages.append({"role": "user", "content": message})
response = client.chat_completion(messages, max_tokens = 100, temperature = .2)
#connecting to llm, max caps response
return response['choices'][0]['message']['content'].strip()
# import lines go at the top! Any libraries I need to import go up ^
# def magic_eight_ball(message, history):
# return random.choice(['Possibly', 'Absolutely not', 'If you try', "Why not", 'Yes!'])
#def yes_or_no(message, history):
# return random.choice(['Yes','No'])
# always need two parameters in hf
#def echo(message, history):
# return message
print("hello world")
chatbot = gr.ChatInterface(respond, type="messages", title = "LLM Chatbox", theme = "gradio/soft")
# declaring chatbot so that user can interact and see their conversation history and send new messages
chatbot.launch()