Scaryscar commited on
Commit
88f44aa
·
verified ·
1 Parent(s): fa5ecaa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +163 -96
app.py CHANGED
@@ -1,138 +1,205 @@
1
- from transformers import pipeline, AutoTokenizer
2
  import gradio as gr
3
  import torch
4
  import time
 
 
 
 
 
 
 
 
 
5
 
6
- # ===== RELIABLE MODEL LOADING =====
7
- def load_model():
8
- """Guaranteed model loading with multiple fallbacks"""
9
- device = 0 if torch.cuda.is_available() else -1
10
- dtype = torch.float16 if device == 0 else torch.float32
11
-
12
- # Try multiple models in order
13
- models = [
14
- ("mistralai/Mistral-7B-v0.1", {}), # Open-access
15
- ("google/gemma-2b-it", {"low_cpu_mem_usage": True}) # Gated
16
- ]
17
-
18
- for model_name, kwargs in models:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  try:
20
- tokenizer = AutoTokenizer.from_pretrained(model_name)
21
- model = pipeline(
22
- "text-generation",
23
- model=model_name,
24
- tokenizer=tokenizer,
25
- device=device,
26
- torch_dtype=dtype,
27
- **kwargs
28
- )
29
- # Test generation
30
- test_output = model("Test", max_new_tokens=10)[0]['generated_text']
31
- if test_output.strip():
32
- print(f"✅ Loaded {model_name}")
33
- return model, tokenizer
34
- except Exception as e:
35
- print(f"⚠️ Failed {model_name}: {str(e)}")
36
-
37
- raise RuntimeError("All models failed to load")
38
 
39
- # Initialize with error handling
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  try:
41
- model, tokenizer = load_model()
42
  except Exception as e:
43
- model = None
44
- print(f"🔴 Critical error: {str(e)}")
45
 
46
- # ===== GUARANTEED GENERATION =====
47
- def generate_answer(prompt):
48
- """Always returns a meaningful answer"""
 
49
  if not prompt.strip():
50
- return "Please enter a valid question"
51
 
52
- if model is None:
53
- return "System error - please try again later"
54
 
55
- try:
56
- start_time = time.time()
57
-
58
- # Robust prompt engineering
59
- full_prompt = f"""Provide a detailed step-by-step answer to this question:
60
-
61
- Question: {prompt}
62
-
63
- Answer in clear numbered steps:
64
- 1."""
65
-
66
- output = model(
67
- full_prompt,
68
- max_new_tokens=200,
69
- temperature=0.7,
70
- do_sample=True,
71
- pad_token_id=tokenizer.eos_token_id
72
- )[0]['generated_text']
73
-
74
- # Extract and format answer
75
- answer = output.split("Answer in clear numbered steps:")[-1]
76
- answer = answer.strip()
77
-
78
- # Ensure we got actual content
79
- if not answer or len(answer.split()) < 3:
80
- answer = "I couldn't generate a proper answer. Please try rephrasing your question."
81
-
82
- # Calculate metrics
83
- gen_time = time.time() - start_time
84
- word_count = len(answer.split())
85
-
86
- return f"""📚 Step-by-Step Answer:
87
 
88
- {answer}
89
 
90
- ⏱️ Generated in {gen_time:.2f}s | {word_count} words"""
91
 
92
- except Exception as e:
93
- return f"Error generating answer: {str(e)}"
94
 
95
- # ===== COMPLETE UI =====
96
- with gr.Blocks(theme=gr.themes.Default(), title="🔍 Expert Answer Bot") as demo:
97
- gr.Markdown("""<h1><center>Get Detailed Explanations</center></h1>""")
98
 
99
  with gr.Row():
100
- question = gr.Textbox(
101
  label="Your Question",
102
- placeholder="How does blockchain technology work?",
103
  lines=3
104
  )
105
 
106
  with gr.Row():
107
- submit_btn = gr.Button("Get Answer", variant="primary")
108
 
109
  with gr.Row():
110
  answer = gr.Textbox(
111
- label="Step-by-Step Explanation",
112
  lines=8,
113
  interactive=False
114
  )
115
 
116
- # Examples that are known to work
 
 
 
 
 
 
117
  gr.Examples(
118
  examples=[
119
- "Explain how photosynthesis works in plants",
120
- "Describe the steps to solve a quadratic equation",
121
- "How does a neural network learn? List the steps"
122
  ],
123
- inputs=question
124
  )
125
 
 
 
 
 
 
126
  submit_btn.click(
127
- fn=generate_answer,
128
- inputs=question,
129
- outputs=answer
 
 
 
 
130
  )
131
 
132
- # ===== FAILSAFE LAUNCH =====
133
  if __name__ == "__main__":
134
  demo.launch(
135
  server_name="0.0.0.0",
136
- server_port=7860,
137
- show_error=True
138
  )
 
 
1
  import gradio as gr
2
  import torch
3
  import time
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+ import pandas as pd
7
+ import plotly.express as px
8
+ from transformers import pipeline, AutoTokenizer
9
+ from io import BytesIO
10
+ import base64
11
+ import warnings
12
+ warnings.filterwarnings("ignore")
13
 
14
+ # ===== CORE SYSTEM =====
15
+ class AISystem:
16
+ def __init__(self):
17
+ self.device = 0 if torch.cuda.is_available() else -1
18
+ self.dtype = torch.float16 if self.device == 0 else torch.float32
19
+ self.model = None
20
+ self.tokenizer = None
21
+ self.load_models()
22
+
23
+ def load_models(self):
24
+ """Smart model loading with multiple fallbacks"""
25
+ models = [
26
+ ("mistralai/Mistral-7B-v0.1", {}), # Open-access
27
+ ("google/gemma-2b-it", {"low_cpu_mem_usage": True}) # Gated
28
+ ]
29
+
30
+ for model_name, kwargs in models:
31
+ try:
32
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
33
+ self.model = pipeline(
34
+ "text-generation",
35
+ model=model_name,
36
+ tokenizer=self.tokenizer,
37
+ device=self.device,
38
+ torch_dtype=self.dtype,
39
+ **kwargs
40
+ )
41
+ # Verify model works
42
+ test_output = self.generate("Test", simple=True)
43
+ if test_output and len(test_output.split()) > 3:
44
+ print(f"✅ Loaded {model_name}")
45
+ return
46
+ except Exception as e:
47
+ print(f"⚠️ Failed {model_name}: {str(e)}")
48
+
49
+ raise RuntimeError("All models failed to load")
50
+
51
+ def generate(self, prompt, simple=False):
52
+ """Guaranteed generation with error handling"""
53
  try:
54
+ full_prompt = prompt if simple else f"""
55
+ Provide a detailed, step-by-step answer. Include graphs if requested.
56
+
57
+ Question: {prompt}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
59
+ Answer:
60
+ 1."""
61
+
62
+ output = self.model(
63
+ full_prompt,
64
+ max_new_tokens=250,
65
+ temperature=0.7,
66
+ do_sample=True,
67
+ pad_token_id=self.tokenizer.eos_token_id
68
+ )[0]['generated_text']
69
+
70
+ return output.split("Answer:")[-1].strip()
71
+ except Exception:
72
+ return "I couldn't generate a response. Please try again."
73
+
74
+ def create_graph(self, data_type):
75
+ """Generate different graph types"""
76
+ try:
77
+ x = np.linspace(0, 10, 100)
78
+ if data_type == "linear":
79
+ y = x
80
+ plt.plot(x, y)
81
+ plt.title("Linear Relationship")
82
+ elif data_type == "quadratic":
83
+ y = x**2
84
+ plt.plot(x, y)
85
+ plt.title("Quadratic Relationship")
86
+ elif data_type == "random":
87
+ y = np.random.rand(100)
88
+ plt.scatter(x, y)
89
+ plt.title("Random Data")
90
+
91
+ plt.xlabel("X-axis")
92
+ plt.ylabel("Y-axis")
93
+ buf = BytesIO()
94
+ plt.savefig(buf, format='png')
95
+ plt.close()
96
+ return f"data:image/png;base64,{base64.b64encode(buf.getvalue()).decode('utf-8')}"
97
+ except Exception:
98
+ return None
99
+
100
+ # Initialize system
101
  try:
102
+ ai_system = AISystem()
103
  except Exception as e:
104
+ print(f"🔴 System initialization failed: {str(e)}")
105
+ ai_system = None
106
 
107
+ # ===== GRADIO INTERFACE =====
108
+ def process_query(prompt):
109
+ start_time = time.time()
110
+
111
  if not prompt.strip():
112
+ return "Please enter a valid question", None
113
 
114
+ if ai_system is None:
115
+ return "System initialization failed - please check logs", None
116
 
117
+ # Check for graph requests
118
+ graph_type = None
119
+ graph_keywords = {
120
+ "linear graph": "linear",
121
+ "quadratic graph": "quadratic",
122
+ "random data": "random",
123
+ "plot": "linear",
124
+ "chart": "linear"
125
+ }
126
+
127
+ for keyword, g_type in graph_keywords.items():
128
+ if keyword in prompt.lower():
129
+ graph_type = g_type
130
+ break
131
+
132
+ # Generate response
133
+ response = ai_system.generate(prompt)
134
+
135
+ # Create graph if requested
136
+ graph = None
137
+ if graph_type:
138
+ graph = ai_system.create_graph(graph_type)
139
+
140
+ # Format output
141
+ gen_time = time.time() - start_time
142
+ formatted_response = f"""📊 Step-by-Step Answer:
 
 
 
 
 
 
143
 
144
+ {response}
145
 
146
+ ⏱️ Generated in {gen_time:.2f} seconds"""
147
 
148
+ return formatted_response, graph
 
149
 
150
+ with gr.Blocks(theme=gr.themes.Soft(), title="🧠 AI Expert Assistant") as demo:
151
+ gr.Markdown("""<h1><center>Intelligent Answer Engine</center></h1>""")
 
152
 
153
  with gr.Row():
154
+ query = gr.Textbox(
155
  label="Your Question",
156
+ placeholder="Ask anything... (e.g. 'Explain photosynthesis and show a linear graph')",
157
  lines=3
158
  )
159
 
160
  with gr.Row():
161
+ submit_btn = gr.Button("Generate Answer", variant="primary")
162
 
163
  with gr.Row():
164
  answer = gr.Textbox(
165
+ label="Detailed Explanation",
166
  lines=8,
167
  interactive=False
168
  )
169
 
170
+ with gr.Row():
171
+ graph_output = gr.Image(
172
+ label="Generated Graph",
173
+ visible=False
174
+ )
175
+
176
+ # Example queries
177
  gr.Examples(
178
  examples=[
179
+ "Explain quantum computing and show a linear graph",
180
+ "Describe the water cycle with a quadratic graph",
181
+ "How does machine learning work? Show random data"
182
  ],
183
+ inputs=query
184
  )
185
 
186
+ def update_ui(response, graph):
187
+ if graph:
188
+ return response, gr.update(visible=True, value=graph)
189
+ return response, gr.update(visible=False)
190
+
191
  submit_btn.click(
192
+ fn=process_query,
193
+ inputs=query,
194
+ outputs=[answer, graph_output]
195
+ ).then(
196
+ fn=update_ui,
197
+ inputs=[answer, graph_output],
198
+ outputs=[answer, graph_output]
199
  )
200
 
 
201
  if __name__ == "__main__":
202
  demo.launch(
203
  server_name="0.0.0.0",
204
+ server_port=7860
 
205
  )