Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from sentence_transformers import SentenceTransformer, util | |
| import openai | |
| import os | |
| import matplotlib.pyplot as plt | |
| from matplotlib import font_manager | |
| from PIL import Image | |
| os.environ["TOKENIZERS_PARALLELISM"] = "false" | |
| # Initialize paths and model identifiers for easy configuration and maintenance | |
| filename = "output_topic_details.txt" # Path to the file storing chess-specific details | |
| retrieval_model_name = 'output/sentence-transformer-finetuned/' | |
| openai.api_key = os.environ["OPENAI_API_KEY"] | |
| system_message = "You are a video game recommedation chatbot. You respond to requests in a friendly manner, with the name, price, release date, description and website of a game without bolding and bullet points" | |
| # Initial system message to set the behavior of the assistant | |
| messages = [{"role": "system", "content": system_message}] | |
| # Attempt to load the necessary models and provide feedback on success or failure | |
| try: | |
| retrieval_model = SentenceTransformer(retrieval_model_name) | |
| print("Models loaded successfully.") | |
| except Exception as e: | |
| print(f"Failed to load models: {e}") | |
| def load_and_preprocess_text(filename): | |
| """ | |
| Load and preprocess text from a file, removing empty lines and stripping whitespace. | |
| """ | |
| try: | |
| with open(filename, 'r', encoding='utf-8') as file: | |
| segments = [line.strip() for line in file if line.strip()] | |
| print("Text loaded and preprocessed successfully.") | |
| return segments | |
| except Exception as e: | |
| print(f"Failed to load or preprocess text: {e}") | |
| return [] | |
| segments = load_and_preprocess_text(filename) | |
| def find_relevant_segment(user_query, segments): | |
| """ | |
| Find the most relevant text segment for a user's query using cosine similarity among sentence embeddings. | |
| This version finds the best match based on the content of the query. | |
| """ | |
| try: | |
| # Lowercase the query for better matching | |
| lower_query = user_query.lower() | |
| # Encode the query and the segments | |
| query_embedding = retrieval_model.encode(lower_query) | |
| segment_embeddings = retrieval_model.encode(segments) | |
| # Compute cosine similarities between the query and the segments | |
| similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0] | |
| # Find the index of the most similar segment | |
| best_idx = similarities.argmax() | |
| # Return the most relevant segment | |
| return segments[best_idx] | |
| except Exception as e: | |
| print(f"Error in finding relevant segment: {e}") | |
| return "" | |
| def generate_response(user_query, relevant_segment): | |
| """ | |
| Generate a response emphasizing the bot's capability in providing video game reccomendations. | |
| """ | |
| try: | |
| user_message = f"Here's the information on this game: {relevant_segment}" | |
| # Append user's message to messages list | |
| messages.append({"role": "user", "content": user_message}) | |
| response = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo", | |
| messages=messages, | |
| max_tokens=400, | |
| temperature=0.5, | |
| top_p=1, | |
| frequency_penalty=0.5, | |
| presence_penalty=0.5 | |
| ) | |
| # Extract the response text | |
| output_text = response['choices'][0]['message']['content'].strip() | |
| # Append assistant's message to messages list for context | |
| messages.append({"role": "assistant", "content": output_text}) | |
| return output_text | |
| except Exception as e: | |
| print(f"Error in generating response: {e}") | |
| return f"Error in generating response: {e}" | |
| def query_model(question): | |
| """ | |
| Process a question, find relevant information, and generate a response. | |
| """ | |
| if question == "": | |
| return "Welcome to Plai! Ask me for any game recommendations." | |
| relevant_segment = find_relevant_segment(question, segments) | |
| if not relevant_segment: | |
| return "Could not find specific information. Please refine your question." | |
| response = generate_response(question, relevant_segment) | |
| image = get_image_for_response(question) | |
| return response, image | |
| IMAGE_DIRECTORY = "Images" | |
| def get_image_for_response(question): | |
| """ | |
| Retrieve an image based on the response text. | |
| """ | |
| # Normalize the response text to create a filename | |
| file_name = question.lower().replace(" ", "_") | |
| image_path = os.path.join(IMAGE_DIRECTORY, file_name + ".jpg") | |
| print(question) | |
| print(image_path) | |
| # Check if the image file exists | |
| if os.path.exists(image_path): | |
| return Image.open(image_path) | |
| else: | |
| # Return a default or placeholder image if the file is not found | |
| default_image_path = os.path.join(IMAGE_DIRECTORY, "Game Aesthetic.jpeg") | |
| return Image.open(default_image_path) | |
| # Define the welcome message and specific topics the chatbot can provide information about | |
| welcome_message = """ | |
| <span style="color:#FFF4EA; font-size:90px; font-weight:bold;">˚˖𓍢ִ໋🌷͙֒✧ Welcome to Plai!͙֒˚.🎀༘⋆ .</span> | |
| <span style="color:#fc6c85; font-size:45px; font-weight:bold;">༘˚⋆𐙚。‧𖦹.✧♡˚ৎ୭🩰.𓍢✧˚.💮ִPlai Your Way🌷✩°𓏲🍥⋆.*₊。⋆𖧧.࣪˚⊹₊ᰔ</span> | |
| <span style="color:#F7879A; font-size:40px; font-weight:bold;">🫧𓍢ִ໋🍬Your AI-Driven Assistant for All Videogame Related Queries˚˖𓍢ִ໋🦢</span> | |
| <span style="color:#E75480; font-size:27px; font-weight:bold;">°❀⋆.♡𓍢Created by Perennial, Jiya, and Ly-Ly of the 2024 Kode With Klossy San Francisco Campೃ࿔*˚⊹:・</span> | |
| <span style="color:#AB4E68; font-size:25px; font-weight:bold;">𓍢ִ໋🌷͙֒₊˚*Feel Free to ask for Recommendations Based on the Topics Belowੈ🎀⸝⸝🍓⋆</span> | |
| """ | |
| topics = """ | |
| <span style="color:#A25F9D; font-size:20px; font-weight:light;">🎀୭✧Genre🧷˚.₊</span> | |
| <span style="color:#A25F9D; font-size:20px; font-weight:light;">₊˚˖𓍢ִ🍓✧Price˚🎀༘⋆゚</span> | |
| <span style="color:#A25F9D; font-size:20px; font-weight:light;">📍ִ໋🌷͙֒✧Style🎀༘🩷˚.⋆</span> | |
| <span style="color:#A25F9D; font-size:20px; font-weight:light;">🍰🎀♡Feeling*.゚🧸</span> | |
| <span style="color:#A25F9D; font-size:20px; font-weight:light;">₊˚🦢✩Year🎀⊹☁️♡゚</span> | |
| <span style="color:#A25F9D; font-size:20px; font-weight:light;">⋆。‧˚ʚ꣑ৎɞ˚‧。⋆</span> | |
| """ | |
| theme = gr.themes.Base().set( | |
| background_fill_primary='#FAB9CB', # Light pink background | |
| background_fill_primary_dark='#AB4E68', # Light pink background | |
| background_fill_secondary='#AB4E68', # Light orange background | |
| background_fill_secondary_dark='#AB4E68', # Dark orange background | |
| border_color_accent='#FAB9CB', # Accent border color | |
| border_color_accent_dark='#AB4E68', # Dark accent border color | |
| border_color_accent_subdued='#AB4E68', # Subdued accent border color | |
| border_color_primary='#AB4E68', # Primary border color | |
| block_border_color='#FAB9CB', # Block border color | |
| button_primary_background_fill='#AB4E68', # Primary button background color | |
| button_primary_background_fill_dark='#AB4E68', # Dark primary button background color | |
| ) | |
| # Setup the Gradio Blocks interface with custom layout components | |
| with gr.Blocks(theme=theme) as demo: | |
| gr.Image("Video Game Banner.gif", show_label = False, show_share_button = False, show_download_button = False) | |
| gr.Markdown(welcome_message) # Display the formatted welcome message | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown(topics) | |
| # Show the topics on the left side | |
| gr.Image("Image 8-1-24 at 2.42 PM.jpeg", show_label = False, show_share_button = False, show_download_button = False, height=294, width=500 ) | |
| with gr.Row(): | |
| with gr.Column(): | |
| question = gr.Textbox(label="ׁ ׁ ꥓ ݄ ׁ 𖦹 ׅ 𓈒Your Question⋆𐙚₊˚⊹♡", placeholder="༘⋆🌷🫧What are You Wondering?💭₊˚ෆ") | |
| answer = gr.Textbox(label="˚ ༘˚Plai's Responseೀ⋆。", placeholder="ೀ🍨‧°Plai Your Way Here🎀⊹°。♡", interactive=False, lines=17) | |
| image_output=gr.Image(label="ꕤ*.゚⋅˚₊‧ Image Outputs Here୨୧ ‧₊˚ ⋅♡ ̆̈") | |
| submit_button = gr.Button("˚₊‧꒰აAsk Away໒꒱ ‧₊˚") | |
| submit_button.click(fn=query_model, inputs=question, outputs=[answer,image_output]) | |
| # Launch the Gradio app to allow user interaction | |
| demo.launch(share=True) | |