Spaces:
Sleeping
Sleeping
| import re | |
| import random | |
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| import torch | |
| class ChatAssistant: | |
| def __init__(self): | |
| self.name = "AI Assistant" | |
| self.user_name = "" | |
| self.model_loaded = False | |
| self.generator = None | |
| self.tokenizer = None | |
| self.model = None | |
| self.load_model() | |
| def load_model(self): | |
| """Load the AI model""" | |
| if self.model_loaded: | |
| return True | |
| try: | |
| model_name = "microsoft/DialoGPT-medium" | |
| self.tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| self.model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| device_map="auto" if torch.cuda.is_available() else None, | |
| low_cpu_mem_usage=True | |
| ) | |
| if self.tokenizer.pad_token is None: | |
| self.tokenizer.pad_token = self.tokenizer.eos_token | |
| self.generator = pipeline( | |
| "text-generation", | |
| model=self.model, | |
| tokenizer=self.tokenizer, | |
| device=0 if torch.cuda.is_available() else -1, | |
| return_full_text=False | |
| ) | |
| self.model_loaded = True | |
| return True | |
| except Exception as e: | |
| print(f"Could not load AI model: {str(e)}") | |
| return False | |
| def generate_response(self, query: str) -> str: | |
| """Generate response using the AI model""" | |
| if not self.model_loaded: | |
| return "Sorry, I'm having technical difficulties. Please try again later." | |
| try: | |
| prompt = f"""The following is a conversation with an AI assistant. The assistant is helpful, knowledgeable, and provides detailed answers. | |
| User: {query} | |
| AI:""" | |
| response = self.generator( | |
| prompt, | |
| max_new_tokens=300, | |
| do_sample=True, | |
| temperature=0.7, | |
| top_p=0.9, | |
| pad_token_id=self.tokenizer.eos_token_id, | |
| repetition_penalty=1.1, | |
| no_repeat_ngram_size=3 | |
| ) | |
| if response and len(response) > 0: | |
| return response[0]["generated_text"].strip() | |
| except Exception as e: | |
| print(f"AI generation error: {e}") | |
| return "I couldn't generate a response. Please try asking differently." | |
| def process_message(self, message: str) -> str: | |
| """Process user message and generate response""" | |
| name_response = self.get_user_name(message) | |
| if name_response: | |
| return name_response | |
| return self.generate_response(message) | |
| def get_user_name(self, message): | |
| """Check if user is introducing themselves""" | |
| name_patterns = [ | |
| r"my name is (\w+)", | |
| r"i'm (\w+)", | |
| r"i am (\w+)", | |
| r"call me (\w+)" | |
| ] | |
| for pattern in name_patterns: | |
| match = re.search(pattern, message.lower()) | |
| if match: | |
| self.user_name = match.group(1).capitalize() | |
| return f"Nice to meet you, {self.user_name}! How can I help you today?" | |
| return None | |
| # Initialize the assistant | |
| assistant = ChatAssistant() | |
| def chat_response(message, chat_history): | |
| """Generate response for Gradio chat interface""" | |
| if not message.strip(): | |
| return chat_history, "" | |
| bot_message = assistant.process_message(message) | |
| chat_history.append((message, bot_message)) | |
| return chat_history, "" | |
| def greet(): | |
| return [(None, random.choice([ | |
| "Hello! I'm your AI assistant. How can I help you today?", | |
| "Hi there! What would you like to know?", | |
| "Welcome! I'm ready to answer your questions." | |
| ]))] | |
| # Create Gradio interface | |
| def create_interface(): | |
| with gr.Blocks( | |
| title="AI Assistant", | |
| theme=gr.themes.Soft() | |
| ) as iface: | |
| chatbot = gr.Chatbot( | |
| value=greet(), | |
| height=500, | |
| label="AI Assistant" | |
| ) | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| label="Type your message", | |
| placeholder="Ask me anything...", | |
| lines=2 | |
| ) | |
| submit_btn = gr.Button("Send", variant="primary") | |
| clear_btn = gr.Button("Clear Chat") | |
| msg.submit(chat_response, [msg, chatbot], [chatbot, msg]) | |
| submit_btn.click(chat_response, [msg, chatbot], [chatbot, msg]) | |
| clear_btn.click(lambda: (greet(), ""), outputs=[chatbot, msg]) | |
| return iface | |
| # Launch the interface | |
| if __name__ == "__main__": | |
| interface = create_interface() | |
| interface.launch(share=True) |