Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from sentence_transformers import SentenceTransformer, util | |
| from transformers import GPT2LMHeadModel, GPT2Tokenizer | |
| import os | |
| os.environ["TOKENIZERS_PARALLELISM"] = "false" | |
| # Initialize paths and model identifiers for easy configuration and maintenance | |
| filename = "output_topic_details.txt" # Path to the file storing destress-specific details | |
| retrieval_model_name = 'output/sentence-transformer-finetuned/' | |
| # Load GPT-2 model and tokenizer | |
| tokenizer = GPT2Tokenizer.from_pretrained("gpt2") | |
| model = GPT2LMHeadModel.from_pretrained("gpt2") | |
| system_message = "You are a comfort chatbot specialized in providing information on therapy, destressing activities, and student opportunities." | |
| messages = [{"role": "system", "content": system_message}] | |
| messages.append({ | |
| "role": "system", | |
| "content": "Do not use Markdown Format. Do not include hashtags or asterisks" | |
| }) | |
| # Load the retrieval model | |
| try: | |
| retrieval_model = SentenceTransformer(retrieval_model_name) | |
| print("Models loaded successfully.") | |
| except Exception as e: | |
| print(f"Failed to load models: {e}") | |
| def load_and_preprocess_text(filename): | |
| try: | |
| with open(filename, 'r', encoding='utf-8') as file: | |
| segments = [line.strip() for line in file if line.strip()] | |
| print("Text loaded and preprocessed successfully.") | |
| return segments | |
| except Exception as e: | |
| print(f"Failed to load or preprocess text: {e}") | |
| return [] | |
| segments = load_and_preprocess_text(filename) | |
| def find_relevant_segment(user_query, segments): | |
| try: | |
| lower_query = user_query.lower() | |
| query_embedding = retrieval_model.encode(lower_query) | |
| segment_embeddings = retrieval_model.encode(segments) | |
| similarities = util.pytorch_cos_sim(query_embedding, segment_embeddings)[0] | |
| best_idx = similarities.argmax() | |
| return segments[best_idx] | |
| except Exception as e: | |
| print(f"Error in finding relevant segment: {e}") | |
| return "" | |
| def generate_response(user_query, relevant_segment): | |
| try: | |
| user_message = f"Here's the information on your request: {relevant_segment}" | |
| messages.append({"role": "user", "content": user_message}) | |
| # Encode the input and generate a response | |
| input_ids = tokenizer.encode(user_message, return_tensors='pt') | |
| output = model.generate(input_ids, max_length=150, num_return_sequences=1) | |
| output_text = tokenizer.decode(output[0], skip_special_tokens=True) | |
| # Append assistant's message to messages list for context | |
| messages.append({"role": "assistant", "content": output_text}) | |
| return output_text | |
| except Exception as e: | |
| print(f"Error in generating response: {e}") | |
| return f"Error in generating response: {e}" | |
| def query_model(question): | |
| if question == "": | |
| return "Welcome to CalmConnect! Ask me anything about destressing strategies or student opportunities. Feel free to talk to our online therapist!" | |
| relevant_segment = find_relevant_segment(question, segments) | |
| if not relevant_segment: | |
| return "Could not find specific information. Please refine your question or head to our resources page." | |
| response = generate_response(question, relevant_segment) | |
| return response | |
| # Define the HTML iframe content | |
| # (Your iframe content goes here) | |
| # Define the welcome message and specific topics the chatbot can provide information about | |
| # (Your welcome message and topics go here) | |
| # Setup the Gradio Blocks interface with custom layout components | |
| with gr.Blocks() as demo: | |
| gr.Image("CalmConnect.jpg", show_label=False, show_share_button=False, show_download_button=False) | |
| gr.Markdown(welcome_message) | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown(topics) | |
| gr.HTML(iframe) | |
| gr.HTML(iframe2) | |
| with gr.Column(): | |
| gr.Markdown(topics2) | |
| with gr.Row(): | |
| with gr.Column(): | |
| question = gr.Textbox(label="You", placeholder="What do you want to talk to CalmBot about?") | |
| answer = gr.Textbox(label="CalmBot's Response :D", placeholder="CalmBot will respond here..", interactive=False, lines=20) | |
| submit_button = gr.Button("Submit") | |
| submit_button.click(fn=query_model, inputs=question, outputs=answer) | |
| with gr.Row(): | |
| # (Your buttons go here) | |
| # Launch the Gradio app to allow user interaction | |
| demo.launch(share=True) | |