import gradio as gr import json from transformers import BertTokenizer, BertForSequenceClassification import torch from quotes_spider import run_spider # Load BERT model and tokenizer tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertForSequenceClassification.from_pretrained('bert-base-uncased') def generate_response(message): # Customize the greeting response if message.lower() == "hi": return "Hi. How can I help you?" # Define other conversational responses greetings = ["hello", "hey", "how are you", "what's up"] if any(greet in message.lower() for greet in greetings): return "Hello! How can I assist you today?" # For other messages, use BERT for classification or response generation inputs = tokenizer(message, return_tensors="pt") outputs = model(**inputs) probs = torch.nn.functional.softmax(outputs.logits, dim=-1) confidence, predicted_class = torch.max(probs, dim=-1) # Example classification (you can adapt this to your specific needs) if predicted_class == 0: # Adjust according to your classification setup return "It seems like you're asking about something specific. Let me find that for you." else: return "I'm not sure how to help with that right now. Can you provide more details?" def fetch_quotes(): # Run Scrapy spider and get quotes quotes = run_spider() # This should return a JSON string or dict return json.loads(quotes) if quotes else [] def chatbot_response(message, url): response = "" if url: quotes = fetch_quotes() if quotes: response += f"I found a quote: \"{quotes[0]['text']}\" by {quotes[0]['author']}." if message: response += f" {generate_response(message)}" return response.strip() # Define the Gradio interface iface = gr.Interface( fn=chatbot_response, inputs=[ gr.Textbox(lines=1, placeholder="Enter your message here...", label="Message"), gr.Textbox(lines=1, placeholder="Enter URL here...", label="URL") ], outputs="text", title="Conversational Scrapy-BERT Chatbot" ) iface.launch()