Spaces:
Sleeping
Sleeping
| from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
| import gradio as gr | |
| import re | |
| import torch | |
| # Use a much smaller, CPU-friendly model | |
| MODEL_NAME = "microsoft/DialoGPT-small" # Much lighter alternative | |
| print("Loading lightweight model for CPU...") | |
| print("This should load much faster...") | |
| try: | |
| # Create pipeline directly - simpler approach | |
| generator = pipeline( | |
| "text-generation", | |
| model=MODEL_NAME, | |
| device=-1, # Force CPU | |
| do_sample=True, | |
| temperature=0.7 | |
| ) | |
| print("✅ Model loaded successfully!") | |
| except Exception as e: | |
| print(f"❌ Error: {e}") | |
| print("Falling back to rule-based classification...") | |
| generator = None | |
| def rule_based_classify(question): | |
| """Simple rule-based classification as fallback""" | |
| question_lower = question.lower() | |
| # Math patterns | |
| math_patterns = [ | |
| r'\d+\s*[\+\-\*\/\^]\s*\d+', | |
| r'what\s+is\s+\d+', | |
| r'calculate', | |
| r'solve', | |
| r'equation', | |
| r'math', | |
| r'plus|minus|times|divided|multiply' | |
| ] | |
| # Opinion patterns | |
| opinion_patterns = [ | |
| r'favorite|prefer|best|worst|opinion', | |
| r'what.*think|feel|believe', | |
| r'should\s+i|recommend', | |
| r'better|worse' | |
| ] | |
| # Check for math | |
| for pattern in math_patterns: | |
| if re.search(pattern, question_lower): | |
| return "math", "This appears to be a math question. Please solve it step by step." | |
| # Check for opinion | |
| for pattern in opinion_patterns: | |
| if re.search(pattern, question_lower): | |
| return "opinion", "This is asking for an opinion or preference. Answers may vary by person." | |
| # Default to factual | |
| return "factual", "This appears to be asking for factual information." | |
| def classify_and_answer(question): | |
| """Classify and answer questions with fallback options""" | |
| if not question or question.strip() == "": | |
| return "Please enter a valid question." | |
| # If model failed to load, use rule-based approach | |
| if generator is None: | |
| category, answer = rule_based_classify(question) | |
| return f"**Category:** {category.title()}\n**Answer:** {answer}" | |
| # Simple prompt for smaller model | |
| prompt = f"Question: {question}\nThis question is asking about:" | |
| try: | |
| # Generate with small model | |
| output = generator( | |
| prompt, | |
| max_length=len(prompt) + 50, | |
| num_return_sequences=1, | |
| pad_token_id=generator.tokenizer.eos_token_id | |
| ) | |
| generated_text = output[0]['generated_text'] | |
| response = generated_text[len(prompt):].strip() | |
| # Simple classification based on keywords in response | |
| response_lower = response.lower() | |
| if any(word in response_lower for word in ['calculate', 'math', 'number', 'equation']): | |
| category = "math" | |
| elif any(word in response_lower for word in ['opinion', 'prefer', 'think', 'feel']): | |
| category = "opinion" | |
| else: | |
| category = "factual" | |
| return f"Category: {category.title()}\nAnswer: {response}" | |
| except Exception as e: | |
| print(f"Generation error: {e}") | |
| # Fallback to rule-based | |
| category, answer = rule_based_classify(question) | |
| return f"**Category:** {category.title()}\n**Answer:** {answer}" | |
| # Build simple Gradio UI | |
| with gr.Blocks(title="Quick Question Classifier") as iface: | |
| gr.Markdown("# 🚀 Quick Question Classifier") | |
| gr.Markdown("*Lightweight CPU-friendly version*") | |
| question_input = gr.Textbox( | |
| lines=2, | |
| placeholder="Enter your question here...", | |
| label="Your Question" | |
| ) | |
| submit_btn = gr.Button("Classify Question", variant="primary") | |
| answer_output = gr.Textbox( | |
| lines=3, | |
| label="Classification Result", | |
| interactive=False | |
| ) | |
| submit_btn.click(classify_and_answer, question_input, answer_output) | |
| question_input.submit(classify_and_answer, question_input, answer_output) | |
| gr.Examples( | |
| examples=[ | |
| ["What is the capital of France?"], | |
| ["What is 25 + 17?"], | |
| ["What's your favorite color?"], | |
| ["How many planets are in our solar system?"], | |
| ["What is 144 divided by 12?"] | |
| ], | |
| inputs=question_input | |
| ) | |
| if __name__ == "__main__": | |
| print("Starting Gradio interface...") | |
| iface.launch(share=False, server_port=7860) |