hari7261 commited on
Commit
54df86c
·
verified ·
1 Parent(s): 9d9ab83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -474
app.py CHANGED
@@ -1,477 +1,88 @@
1
- import re
2
- import random
3
  import gradio as gr
4
- import json
5
- import os
6
- from typing import Dict, List, Any
7
-
8
- # Try to import AI libraries
9
- try:
10
- import openai
11
- OPENAI_AVAILABLE = True
12
- except ImportError:
13
- OPENAI_AVAILABLE = False
14
-
15
- try:
16
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
17
- import torch
18
- TRANSFORMERS_AVAILABLE = True
19
- except ImportError:
20
- TRANSFORMERS_AVAILABLE = False
21
-
22
- # Try to import sentence transformers for semantic search
23
- try:
24
- from sentence_transformers import SentenceTransformer
25
- import numpy as np
26
- SENTENCE_TRANSFORMERS_AVAILABLE = True
27
- except ImportError:
28
- SENTENCE_TRANSFORMERS_AVAILABLE = False
29
-
30
- class CodeGenius:
31
- def __init__(self):
32
- self.name = "CodeGenius"
33
- self.user_name = ""
34
- self.conversation_history = []
35
- self.model_loaded = False
36
- self.generator = None
37
- self.tokenizer = None
38
- self.model = None
39
- self.embedding_model = None
40
-
41
- # Load programming knowledge base
42
- self.programming_data = self.load_programming_data()
43
- self.knowledge_base = self.prepare_knowledge_base()
44
-
45
- # Initialize embedding model for semantic search
46
- self.init_embedding_model()
47
-
48
- # Feature flags (env driven)
49
- self.use_local_llm = os.getenv("USE_LOCAL_LLM", "0") == "1"
50
-
51
- def load_programming_data(self) -> Dict:
52
- """Load programming knowledge from JSON file"""
53
- try:
54
- json_path = os.path.join(os.path.dirname(__file__), 'programming_data.json')
55
- with open(json_path, 'r', encoding='utf-8') as file:
56
- return json.load(file)
57
- except FileNotFoundError:
58
- print("Programming data file not found. Using basic data.")
59
- return self.get_fallback_data()
60
- except json.JSONDecodeError:
61
- print("Error reading programming data. Using basic data.")
62
- return self.get_fallback_data()
63
-
64
- def get_fallback_data(self) -> Dict:
65
- """Fallback data if JSON file is not available"""
66
- return {
67
- "languages": {
68
- "Python": {
69
- "paradigm": ["Object-oriented", "Imperative", "Functional", "Procedural"],
70
- "typing": "Dynamic",
71
- "use_cases": ["Web development", "Data science", "AI/ML", "Automation"],
72
- "common_errors": [
73
- {"name": "IndentationError", "solution": "Ensure consistent use of tabs or spaces"},
74
- {"name": "NameError", "solution": "Check if variable is defined before use"}
75
- ],
76
- "optimization": ["Use list comprehensions", "Avoid global variables", "Use built-in functions"]
77
- },
78
- "JavaScript": {
79
- "paradigm": ["Event-driven", "Functional", "Object-oriented"],
80
- "typing": "Dynamic",
81
- "use_cases": ["Web development", "Frontend", "Backend", "Mobile apps"],
82
- "common_errors": [
83
- {"name": "TypeError", "solution": "Check variable types before operations"},
84
- {"name": "ReferenceError", "solution": "Ensure variables/functions are in scope"}
85
- ],
86
- "optimization": ["Minimize DOM access", "Debounce events", "Use Web Workers"]
87
- }
88
- },
89
- "concepts": {
90
- "OOP": {
91
- "definition": "Object-oriented programming organizes software design around objects rather than functions and logic",
92
- "principles": ["Encapsulation", "Inheritance", "Polymorphism", "Abstraction"]
93
- },
94
- "Functional Programming": {
95
- "definition": "Programming paradigm that treats computation as evaluation of mathematical functions",
96
- "key_features": ["Pure functions", "Immutability", "First-class functions"]
97
- }
98
- }
99
- }
100
-
101
- def prepare_knowledge_base(self) -> List[Dict]:
102
- """Prepare searchable knowledge base from programming data"""
103
- knowledge_items = []
104
-
105
- # Process languages data
106
- for lang_name, lang_data in self.programming_data.get('languages', {}).items():
107
- # Basic language info
108
- knowledge_items.append({
109
- 'type': 'language_info',
110
- 'language': lang_name,
111
- 'content': f"{lang_name} programming language: Paradigms - {', '.join(lang_data.get('paradigm', []))}, "
112
- f"Typing - {lang_data.get('typing', 'N/A')}, "
113
- f"Use cases - {', '.join(lang_data.get('use_cases', []))}",
114
- 'data': lang_data
115
- })
116
-
117
- # Common errors
118
- for error in lang_data.get('common_errors', []):
119
- knowledge_items.append({
120
- 'type': 'error',
121
- 'language': lang_name,
122
- 'content': f"{error.get('name', 'Unknown')} in {lang_name}: "
123
- f"Solution - {error.get('solution', 'N/A')}",
124
- 'data': error
125
- })
126
-
127
- # Optimization tips
128
- for tip in lang_data.get('optimization', []):
129
- knowledge_items.append({
130
- 'type': 'optimization',
131
- 'language': lang_name,
132
- 'content': f"Optimization tip for {lang_name}: {tip}",
133
- 'data': tip
134
- })
135
-
136
- # Process programming concepts
137
- for concept_name, concept_data in self.programming_data.get('concepts', {}).items():
138
- knowledge_items.append({
139
- 'type': 'concept',
140
- 'content': f"{concept_name}: {concept_data.get('definition', 'N/A')}. "
141
- f"Key aspects: {', '.join(concept_data.get('principles', concept_data.get('key_features', [])))}",
142
- 'data': concept_data
143
- })
144
-
145
- return knowledge_items
146
-
147
- def init_embedding_model(self):
148
- """Initialize embedding model for semantic search"""
149
- if SENTENCE_TRANSFORMERS_AVAILABLE:
150
- try:
151
- self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
152
- # Pre-compute embeddings for knowledge base
153
- self.knowledge_embeddings = self.embedding_model.encode([item['content'] for item in self.knowledge_base])
154
- except Exception as e:
155
- print(f"Failed to load embedding model: {e}")
156
- self.embedding_model = None
157
- else:
158
- self.embedding_model = None
159
-
160
- def semantic_search(self, query: str, top_k: int = 3) -> List[Dict]:
161
- """Perform semantic search on knowledge base"""
162
- if self.embedding_model is None:
163
- return self.fallback_search(query, top_k)
164
-
165
- try:
166
- query_embedding = self.embedding_model.encode([query])
167
- similarities = np.dot(query_embedding, self.knowledge_embeddings.T)[0]
168
- top_indices = np.argsort(similarities)[-top_k:][::-1]
169
-
170
- results = []
171
- for idx in top_indices:
172
- if similarities[idx] > 0.3: # Threshold for relevance
173
- results.append({
174
- 'item': self.knowledge_base[idx],
175
- 'score': float(similarities[idx])
176
- })
177
-
178
- return results
179
- except Exception as e:
180
- print(f"Semantic search error: {e}")
181
- return self.fallback_search(query, top_k)
182
-
183
- def fallback_search(self, query: str, top_k: int = 3) -> List[Dict]:
184
- """Fallback search using keyword matching"""
185
- query_words = set(query.lower().split())
186
- results = []
187
-
188
- for item in self.knowledge_base:
189
- content_words = set(item['content'].lower().split())
190
- overlap = len(query_words.intersection(content_words))
191
- if overlap > 0:
192
- results.append({
193
- 'item': item,
194
- 'score': overlap / len(query_words)
195
- })
196
-
197
- results.sort(key=lambda x: x['score'], reverse=True)
198
- return results[:top_k]
199
-
200
- def load_model(self):
201
- """Load AI model for advanced queries"""
202
- if self.model_loaded:
203
- return True
204
-
205
- # Only attempt heavy model if explicitly enabled
206
- if TRANSFORMERS_AVAILABLE and self.use_local_llm:
207
- try:
208
- # Use a code-specific model
209
- model_name = "bigcode/starcoder2-7b"
210
-
211
- self.tokenizer = AutoTokenizer.from_pretrained(model_name)
212
- self.model = AutoModelForCausalLM.from_pretrained(
213
- model_name,
214
- torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
215
- device_map="auto" if torch.cuda.is_available() else None,
216
- low_cpu_mem_usage=True
217
- )
218
-
219
- # Add pad token if not present
220
- if self.tokenizer.pad_token is None:
221
- self.tokenizer.pad_token = self.tokenizer.eos_token
222
-
223
- self.generator = pipeline(
224
- "text-generation",
225
- model=self.model,
226
- tokenizer=self.tokenizer,
227
- device=0 if torch.cuda.is_available() else -1,
228
- return_full_text=False
229
- )
230
-
231
- self.model_loaded = True
232
- print("✅ AI model loaded successfully!")
233
- return True
234
-
235
- except Exception as e:
236
- print(f"⚠️ Could not load AI model: {str(e)}")
237
- return False
238
- else:
239
- if not TRANSFORMERS_AVAILABLE and self.use_local_llm:
240
- print("🔧 Install transformers and torch for AI features")
241
- return False
242
-
243
- def generate_ai_response(self, query: str, context: str = "", code: str = "") -> str:
244
- """Generate conversational AI response using programming knowledge"""
245
- if not self.model_loaded:
246
- if not self.load_model():
247
- return self.generate_openai_style_response(query, context, code)
248
-
249
- try:
250
- # Create a conversational prompt for code assistance
251
- system_prompt = """You are an expert programming assistant with years of experience helping developers.
252
- Your job is to provide helpful, accurate code solutions, explanations, and optimizations.
253
- Provide clear, concise answers with code examples when appropriate.
254
- Explain complex concepts in simple terms and always consider best practices."""
255
-
256
- user_prompt = f"""Based on this programming knowledge: {context}
257
- And this provided code: {code}
258
- Please answer this developer's question: {query}
259
- Provide the best solution with explanation and consider edge cases."""
260
-
261
- # Generate response
262
- full_prompt = f"{system_prompt}\n\nUser: {user_prompt}\nAssistant:"
263
-
264
- response = self.generator(
265
- full_prompt,
266
- max_new_tokens=300,
267
- do_sample=True,
268
- temperature=0.7,
269
- top_p=0.9,
270
- pad_token_id=self.tokenizer.eos_token_id,
271
- repetition_penalty=1.1,
272
- no_repeat_ngram_size=3
273
- )
274
-
275
- if response and len(response) > 0:
276
- generated_text = response[0]["generated_text"]
277
- # Extract only the assistant's response
278
- if "Assistant:" in generated_text:
279
- ai_response = generated_text.split("Assistant:")[-1].strip()
280
- if len(ai_response) > 20:
281
- return ai_response
282
-
283
- except Exception as e:
284
- print(f"AI generation error: {e}")
285
-
286
- # Fallback to OpenAI-style response
287
- return self.generate_openai_style_response(query, context, code)
288
-
289
- def generate_openai_style_response(self, query: str, context: str, code: str) -> str:
290
- """Generate OpenAI-style conversational response using template"""
291
- query_lower = query.lower()
292
-
293
- # Extract key information from context
294
- lang_mentioned = None
295
- for lang in ['python', 'javascript', 'java', 'c++', 'go']:
296
- if lang in query_lower or lang in context.lower():
297
- lang_mentioned = lang
298
- break
299
-
300
- if lang_mentioned:
301
- lang_data = self.programming_data.get('languages', {}).get(lang_mentioned.capitalize(), {})
302
-
303
- if 'error' in query_lower or 'bug' in query_lower or 'fix' in query_lower:
304
- return self.generate_error_response(lang_mentioned, lang_data, query, code)
305
- elif 'optimiz' in query_lower or 'improve' in query_lower or 'speed' in query_lower:
306
- return self.generate_optimization_response(lang_mentioned, lang_data, code)
307
- elif 'explain' in query_lower or 'how does' in query_lower:
308
- return self.generate_explanation_response(lang_mentioned, lang_data, code)
309
- elif 'generate' in query_lower or 'write' in query_lower or 'create' in query_lower:
310
- return self.generate_code_response(lang_mentioned, lang_data, query)
311
- else:
312
- return self.generate_general_lang_response(lang_mentioned, lang_data, query)
313
-
314
- return self.generate_general_programming_response(query, context, code)
315
-
316
- def generate_error_response(self, lang: str, lang_data: dict, query: str, code: str) -> str:
317
- """Generate detailed error explanation and solution"""
318
- common_errors = lang_data.get('common_errors', [])
319
-
320
- bullets = ", ".join([e.get('name', 'Unknown') for e in common_errors[:5]]) or "syntax and runtime issues"
321
- steps = [
322
- "Reproduce the error and capture the full traceback/message",
323
- "Locate the failing line and inspect variables/inputs",
324
- "Minimize to a small reproducible example",
325
- "Apply a fix, then add/adjust a test to prevent regressions",
326
- ]
327
- suggestions = [f"{e.get('name', 'Error')}: {e.get('solution', '')}" for e in common_errors[:5]]
328
- response = (
329
- f"Debugging {lang}:\n"
330
- f"Common issues: {bullets}.\n\n"
331
- f"Code (context):\n{(code or '# no code provided').strip()}\n\n"
332
- f"Steps:\n- " + "\n- ".join(steps) + "\n\n"
333
- + ("Hints:\n- " + "\n- ".join(suggestions) if suggestions else "")
334
- )
335
- return response
336
-
337
- def generate_optimization_response(self, lang: str, lang_data: dict, code: str) -> str:
338
- tips = lang_data.get('optimization', [])
339
- generic = [
340
- "Profile first; optimize hot paths, not guesses",
341
- "Prefer algorithms/data structures with better complexity",
342
- "Avoid unnecessary allocations and copies",
343
- "Cache expensive results where safe",
344
- ]
345
- body = (
346
- f"Performance tips for {lang}:\n- " + "\n- ".join(tips + generic[: max(0, 4 - len(tips))]) +
347
- (f"\n\nCode (context):\n{code.strip()}" if code else "")
348
- )
349
- return body
350
-
351
- def generate_explanation_response(self, lang: str, lang_data: dict, code: str) -> str:
352
- if not code:
353
- return (
354
- f"Explain {lang} code: provide the snippet for a targeted walkthrough.\n"
355
- f"Meanwhile, key {lang} concepts: paradigms={', '.join(lang_data.get('paradigm', []))}, typing={lang_data.get('typing', 'n/a')}."
356
- )
357
- outline = [
358
- "High-level: What does this code do?",
359
- "Inputs/outputs: parameters, return values, side effects",
360
- "Control flow: loops, branches, error handling",
361
- "Data structures and complexity",
362
- ]
363
- return (
364
- f"Explanation ({lang}):\n"
365
- f"Code:\n{code.strip()}\n\n"
366
- f"Consider:\n- " + "\n- ".join(outline)
367
- )
368
-
369
- def generate_code_response(self, lang: str, lang_data: dict, query: str) -> str:
370
- # Provide a minimal idiomatic template per language
371
- templates = {
372
- 'python': (
373
- "# minimal CLI template\n"
374
- "import sys\n\n"
375
- "def main(argv: list[str]) -> int:\n"
376
- " # TODO: implement\n"
377
- " print('Hello from CodeGenius')\n"
378
- " return 0\n\n"
379
- "if __name__ == '__main__':\n"
380
- " raise SystemExit(main(sys.argv[1:]))\n"
381
- ),
382
- 'javascript': (
383
- "// minimal Node.js module template\n"
384
- "export function main(args = []) {\n"
385
- " console.log('Hello from CodeGenius');\n"
386
- "}\n"
387
- ),
388
- 'java': (
389
- "// minimal Java app template\n"
390
- "public class App {\n"
391
- " public static void main(String[] args) {\n"
392
- " System.out.println(\"Hello from CodeGenius\");\n"
393
- " }\n"
394
- "}\n"
395
- )
396
- }
397
- key = lang.lower()
398
- snippet = templates.get(key, "// Provide more detail to generate specific code.")
399
- return f"Generated starter for {lang}:\n{snippet}"
400
-
401
- def generate_general_lang_response(self, lang: str, lang_data: dict, query: str) -> str:
402
- paradigms = ', '.join(lang_data.get('paradigm', []))
403
- use_cases = ', '.join(lang_data.get('use_cases', []))
404
- typing = lang_data.get('typing', 'n/a')
405
- pitfalls = ', '.join([e.get('name', '') for e in lang_data.get('common_errors', [])[:5]])
406
- return (
407
- f"{lang.capitalize()} overview: paradigms={paradigms}; typing={typing}; typical uses={use_cases}.\n"
408
- f"Watch for: {pitfalls}.\n"
409
- f"Query: {query}"
410
- )
411
-
412
- def generate_general_programming_response(self, query: str, context: str, code: str) -> str:
413
- parts = []
414
- if context:
415
- parts.append(f"Relevant knowledge: {context}")
416
- if code:
417
- parts.append(f"Code context:\n{code.strip()}")
418
- parts.append(
419
- "Approach: clarify requirements, choose data structures, write small tests, implement incrementally, and profile if performance matters."
420
- )
421
- return f"Answering: {query}\n" + "\n\n".join(parts)
422
-
423
- def answer(self, query: str, code: str = "") -> str:
424
- """Top-level entry: perform semantic search, then answer."""
425
- # Build context from semantic search
426
- top = self.semantic_search(query, top_k=3)
427
- context_str = " | ".join([t['item']['content'] for t in top]) if top else ""
428
- # Use template or local LLM if enabled
429
- return self.generate_ai_response(query, context_str, code)
430
-
431
-
432
- # -------- Simple UI / Entrypoint --------
433
- def _build_gradio_ui(genius: CodeGenius):
434
- with gr.Blocks(title="CodeGenius") as demo:
435
- gr.Markdown("# CodeGenius\nAn AI-powered programming helper (lightweight mode by default).")
436
- chatbot = gr.Chatbot(height=350)
437
- with gr.Row():
438
- msg = gr.Textbox(label="Ask a question", scale=3)
439
- code_in = gr.Textbox(label="Optional code context", lines=8)
440
- clear = gr.Button("Clear")
441
-
442
- state = gr.State([])
443
-
444
- def respond(user_message, chat_history, code_text):
445
- if not user_message:
446
- return chat_history or [], chat_history or []
447
- reply = genius.answer(user_message, code_text or "")
448
- chat_history = (chat_history or []) + [[user_message, reply]]
449
- return chat_history, chat_history
450
-
451
- msg.submit(respond, [msg, chatbot, code_in], [chatbot, chatbot])
452
- clear.click(lambda: ([], []), None, [chatbot, chatbot], queue=False)
453
- return demo
454
-
455
-
456
- def main():
457
- genius = CodeGenius()
458
- if os.getenv("RUN_UI", "0") == "1":
459
- demo = _build_gradio_ui(genius)
460
- demo.launch(server_name="127.0.0.1", server_port=int(os.getenv("PORT", "7860")))
461
- return
462
- # CLI mode
463
- print("CodeGenius (CLI). Type 'exit' to quit.")
464
- while True:
465
- try:
466
- q = input("You> ").strip()
467
- except (EOFError, KeyboardInterrupt):
468
- print()
469
- break
470
- if q.lower() in {"exit", "quit"}:
471
- break
472
- ans = genius.answer(q)
473
- print(f"Bot> {ans}\n")
474
-
475
 
476
  if __name__ == "__main__":
477
- main()
 
1
+ import torch
 
2
  import gradio as gr
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
+
5
+ # ---------- CONFIG ----------
6
+ MODEL_NAME = "mistralai/Mistral-7B-Instruct-v0.2" # Change to smaller model if needed
7
+
8
+ # Preload model and tokenizer
9
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ MODEL_NAME,
12
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
13
+ device_map="auto"
14
+ )
15
+
16
+ generator = pipeline(
17
+ "text-generation",
18
+ model=model,
19
+ tokenizer=tokenizer,
20
+ max_new_tokens=512,
21
+ temperature=0.5,
22
+ do_sample=True
23
+ )
24
+
25
+ # ---------- TECH FILTER ----------
26
+ def is_tech_query(message: str) -> bool:
27
+ tech_keywords = [
28
+ "python", "java", "javascript", "html", "css", "react", "angular",
29
+ "node", "machine learning", "deep learning", "ai", "api", "code",
30
+ "debug", "error", "technology", "computer", "programming", "software",
31
+ "hardware", "cybersecurity", "database", "sql", "devops", "cloud"
32
+ ]
33
+ return any(k in message.lower() for k in tech_keywords)
34
+
35
+ # ---------- CHAT FUNCTION ----------
36
+ def chat_with_model(message, history):
37
+ if not is_tech_query(message):
38
+ return history + [[message, "⚠️ I can only answer technology-related queries."]]
39
+
40
+ # Build conversation context
41
+ conversation = ""
42
+ for user_msg, bot_msg in history:
43
+ conversation += f"User: {user_msg}\nAssistant: {bot_msg}\n"
44
+ conversation += f"User: {message}\nAssistant:"
45
+
46
+ output = generator(conversation)[0]["generated_text"]
47
+ if "Assistant:" in output:
48
+ answer = output.split("Assistant:")[-1].strip()
49
+ else:
50
+ answer = output.strip()
51
+
52
+ return history + [[message, answer]]
53
+
54
+ # ---------- LOGIN + UI ----------
55
+ session_state = {"authenticated": False}
56
+
57
+ def login(username, password):
58
+ # Simple direct login check
59
+ if (username == "admin" and password == "admin123") or (username == "techuser" and password == "techpass"):
60
+ session_state["authenticated"] = True
61
+ return gr.update(visible=False), gr.update(visible=True), ""
62
+ else:
63
+ return gr.update(), gr.update(visible=False), "❌ Invalid credentials."
64
+
65
+ with gr.Blocks(css=".gradio-container {max-width: 750px; margin: auto;}") as demo:
66
+ # Login Page
67
+ with gr.Group(visible=not session_state["authenticated"]) as login_group:
68
+ gr.Markdown("# 🔐 Login to Tech Chatbot")
69
+ username = gr.Textbox(label="Username")
70
+ password = gr.Textbox(label="Password", type="password")
71
+ login_btn = gr.Button("Login")
72
+ login_status = gr.Markdown("")
73
+
74
+ # Chatbot Page
75
+ with gr.Group(visible=session_state["authenticated"]) as chat_group:
76
+ gr.Markdown("# 💻 Tech Helper Chatbot")
77
+ chatbot = gr.Chatbot(height=500)
78
+ msg = gr.Textbox(placeholder="Type your tech question here...", label="Your Message")
79
+ clear = gr.Button("Clear Chat")
80
+
81
+ msg.submit(chat_with_model, [msg, chatbot], chatbot)
82
+ clear.click(lambda: None, None, chatbot)
83
+
84
+ # Button Logic
85
+ login_btn.click(login, [username, password], [login_group, chat_group, login_status])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
  if __name__ == "__main__":
88
+ demo.launch()