fatimatesting / app.py
fjassim's picture
Update app.py
dd43894 verified
import gradio as gr
from huggingface_hub import InferenceClient
# the training data to use
client = InferenceClient("Qwen/Qwen2.5-7B-Instruct")
# the container of the response of the chatbot
response = ""
# load climate information on Chicago to instruct chatbot
def load_weather_info():
with open("chicago_weather.txt", "r", encoding="utf-8") as file:
design_knowledge = file.read()
return design_knowledge
# load aesthetics info on different styles to instruct chatbot on what styles to recommend and what theyre comprised of
def load_aesthetics_info():
with open("clothing_aesthetics_breakdown.txt", "r", encoding="utf-8") as file:
aesthetics_knowledge = file.read()
return aesthetics_knowledge
# function where chatbot, whose role is outfit picker, generates response in a visual stream
def respond(message, history):
# load info from txt files
weather = load_weather_info()
aesthetics = load_aesthetics_info()
# hold messages from chat and assign role to chatbot
messages = [{"role": "system",
"content": f"You are an expert in picking out awesome cute outfits based on the weather in Chicago using this info: {weather} and based on aesthetics styles using this info: {aesthetics}. If the user asks for an outfit, give a top, bottom, and shoe recommendation with details about color, material, and length based on the current weather. Ask them about their preferred style to collect knowledge on what to recommend. Keep the response to 250 words while being friendly and informal!"}]
# if there is a history add the message to it
if history:
messages.extend(history)
# add the user's message as their content
messages.append({"role": "user", "content": message})
# make responses appear as a stream, limit to 350 tokens, and temp 0.6 to be both creative and direct
response = client.chat_completion(
messages,
max_tokens=350,
temperature=0.6,
stream=True
)
# holds responses of the chatbot
responses = ""
for message in response:
token = message.choices[0].delta.content
responses += token
yield responses
# define chatbot
chatbot = gr.ChatInterface(respond)
chatbot.launch(debug=True)