File size: 4,684 Bytes
e392014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import gradio as gr
import re
import torch

# Use a much smaller, CPU-friendly model
MODEL_NAME = "microsoft/DialoGPT-small"  # Much lighter alternative

print("Loading lightweight model for CPU...")
print("This should load much faster...")

try:
    # Create pipeline directly - simpler approach
    generator = pipeline(
        "text-generation",
        model=MODEL_NAME,
        device=-1,  # Force CPU
        do_sample=True,
        temperature=0.7
    )
    print("✅ Model loaded successfully!")
    
except Exception as e:
    print(f"❌ Error: {e}")
    print("Falling back to rule-based classification...")
    generator = None

def rule_based_classify(question):
    """Simple rule-based classification as fallback"""
    question_lower = question.lower()
    
    # Math patterns
    math_patterns = [
        r'\d+\s*[\+\-\*\/\^]\s*\d+',
        r'what\s+is\s+\d+',
        r'calculate',
        r'solve',
        r'equation',
        r'math',
        r'plus|minus|times|divided|multiply'
    ]
    
    # Opinion patterns  
    opinion_patterns = [
        r'favorite|prefer|best|worst|opinion',
        r'what.*think|feel|believe',
        r'should\s+i|recommend',
        r'better|worse'
    ]
    
    # Check for math
    for pattern in math_patterns:
        if re.search(pattern, question_lower):
            return "math", "This appears to be a math question. Please solve it step by step."
    
    # Check for opinion
    for pattern in opinion_patterns:
        if re.search(pattern, question_lower):
            return "opinion", "This is asking for an opinion or preference. Answers may vary by person."
    
    # Default to factual
    return "factual", "This appears to be asking for factual information."

def classify_and_answer(question):
    """Classify and answer questions with fallback options"""
    if not question or question.strip() == "":
        return "Please enter a valid question."
    
    # If model failed to load, use rule-based approach
    if generator is None:
        category, answer = rule_based_classify(question)
        return f"**Category:** {category.title()}\n**Answer:** {answer}"
    
    # Simple prompt for smaller model
    prompt = f"Question: {question}\nThis question is asking about:"
    
    try:
        # Generate with small model
        output = generator(
            prompt,
            max_length=len(prompt) + 50,
            num_return_sequences=1,
            pad_token_id=generator.tokenizer.eos_token_id
        )
        
        generated_text = output[0]['generated_text']
        response = generated_text[len(prompt):].strip()
        
        # Simple classification based on keywords in response
        response_lower = response.lower()
        if any(word in response_lower for word in ['calculate', 'math', 'number', 'equation']):
            category = "math"
        elif any(word in response_lower for word in ['opinion', 'prefer', 'think', 'feel']):
            category = "opinion"
        else:
            category = "factual"
            
        return f"Category: {category.title()}\nAnswer: {response}"
        
    except Exception as e:
        print(f"Generation error: {e}")
        # Fallback to rule-based
        category, answer = rule_based_classify(question)
        return f"**Category:** {category.title()}\n**Answer:** {answer}"

# Build simple Gradio UI
with gr.Blocks(title="Quick Question Classifier") as iface:
    gr.Markdown("# 🚀 Quick Question Classifier")
    gr.Markdown("*Lightweight CPU-friendly version*")
    
    question_input = gr.Textbox(
        lines=2,
        placeholder="Enter your question here...",
        label="Your Question"
    )
    
    submit_btn = gr.Button("Classify Question", variant="primary")
    
    answer_output = gr.Textbox(
        lines=3,
        label="Classification Result",
        interactive=False
    )
    
    submit_btn.click(classify_and_answer, question_input, answer_output)
    question_input.submit(classify_and_answer, question_input, answer_output)
    
    gr.Examples(
        examples=[
            ["What is the capital of France?"],
            ["What is 25 + 17?"],
            ["What's your favorite color?"],
            ["How many planets are in our solar system?"],
            ["What is 144 divided by 12?"]
        ],
        inputs=question_input
    )

if __name__ == "__main__":
    print("Starting Gradio interface...")
    iface.launch(share=False, server_port=7860)