dumbsonu commited on
Commit
6c6130e
·
1 Parent(s): 65d2bfd

Initial commit

Browse files
Files changed (2) hide show
  1. app.py +406 -61
  2. requirements.txt +0 -0
app.py CHANGED
@@ -1,64 +1,409 @@
1
- import gradio as gr
2
- from huggingface_hub import InferenceClient
3
-
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
- gr.Slider(
53
- minimum=0.1,
54
- maximum=1.0,
55
- value=0.95,
56
- step=0.05,
57
- label="Top-p (nucleus sampling)",
58
- ),
59
- ],
60
  )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
- if __name__ == "__main__":
64
- demo.launch()
 
 
1
+ from flask import Flask, request, jsonify, render_template
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+ import os
4
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # Disable GPU for PyTorch
5
+ import torch
6
+ import speech_recognition as sr
7
+ import pyttsx3
8
+ import wikipediaapi
9
+ from googlesearch import search
10
+ import requests
11
+ from newsapi import NewsApiClient
12
+ import pyjokes
13
+ import yfinance as yf
14
+ from diffusers import StableDiffusionPipeline
15
+ from deep_translator import GoogleTranslator
16
+ import sympy as sp
17
+ from forex_python.converter import CurrencyRates
18
+ import re
19
+ from download_models import download_models_from_s3
20
+ from accelerate import init_empty_weights, load_checkpoint_and_dispatch
21
+
22
+ app = Flask(__name__)
23
+
24
+ engine = pyttsx3.init()
25
+
26
+ wiki_wiki = wikipediaapi.Wikipedia(
27
+ language="en",
28
+ user_agent="AIChatbot/1.0 (sonu.singh@poczta.fm)" # Change email to yours
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  )
30
 
31
+ OPENWEATHER_API_KEY = "59ba015cbd69ea483e8390970ffff366"
32
+
33
+ NEWSAPI_KEY = "46cae6ce089f4b55b748afdaf8fb5ecc" # Replace with your actual NewsAPI key
34
+ newsapi = NewsApiClient(api_key=NEWSAPI_KEY)
35
+
36
+ # Supported Language Mappings (ISO Codes)
37
+ LANGUAGE_CODES = {
38
+ "afrikaans": "af", "albanian": "sq", "amharic": "am", "arabic": "ar", "armenian": "hy",
39
+ "azerbaijani": "az", "basque": "eu", "belarusian": "be", "bengali": "bn", "bosnian": "bs",
40
+ "bulgarian": "bg", "catalan": "ca", "chinese": "zh-CN", "croatian": "hr", "czech": "cs",
41
+ "danish": "da", "dutch": "nl", "english": "en", "estonian": "et", "finnish": "fi",
42
+ "french": "fr", "german": "de", "greek": "el", "hindi": "hi", "hungarian": "hu",
43
+ "indonesian": "id", "italian": "it", "japanese": "ja", "korean": "ko", "latin": "la",
44
+ "latvian": "lv", "lithuanian": "lt", "malay": "ms", "marathi": "mr", "nepali": "ne",
45
+ "norwegian": "no", "persian": "fa", "polish": "pl", "portuguese": "pt", "romanian": "ro",
46
+ "russian": "ru", "serbian": "sr", "slovak": "sk", "slovenian": "sl", "spanish": "es",
47
+ "swedish": "sv", "tamil": "ta", "telugu": "te", "thai": "th", "turkish": "tr",
48
+ "ukrainian": "uk", "urdu": "ur", "vietnamese": "vi", "welsh": "cy", "xhosa": "xh", "zulu": "zu"
49
+ }
50
+
51
+ currency_rates = CurrencyRates()
52
+
53
+ # Load Sentiment Analysis Model
54
+ sentiment_model = pipeline("sentiment-analysis")
55
+
56
+ def speak(text):
57
+ engine.say(text)
58
+ engine.runAndWait()
59
+
60
+ def listen():
61
+ recognizer = sr.Recognizer()
62
+ with sr.Microphone() as source:
63
+ print("🎤 Listening...")
64
+ recognizer.adjust_for_ambient_noise(source)
65
+ audio = recognizer.listen(source)
66
+ try:
67
+ print("🟡 Recognizing...")
68
+ return recognizer.recognize_google(audio)
69
+ except:
70
+ return "❌ Could not understand audio"
71
+
72
+ def search_wikipedia(query):
73
+ page = wiki_wiki.page(query)
74
+ if page.exists():
75
+ return page.summary[:500] # Limit to 500 characters
76
+ return "⚠️ No Wikipedia article found for that topic."
77
+
78
+
79
+ def google_search(query):
80
+ try:
81
+ print(f"🔎 Searching Google for: {query}") # Debugging
82
+ results = list(search(query, num_results=3, advanced=True))
83
+ if not results:
84
+ return "⚠️ No search results found."
85
+
86
+ # Convert SearchResult objects to plain text links
87
+ links = [result.url for result in results]
88
+ return "\n".join(links)
89
+ except Exception as e:
90
+ print(f"❌ Google Search Error: {e}")
91
+ return "⚠️ Error fetching search results."
92
+
93
+ def get_weather(city):
94
+ try:
95
+ city = city.strip().title() # Format city name properly
96
+ url = f"http://api.openweathermap.org/data/2.5/weather?q={city},US&appid={OPENWEATHER_API_KEY}&units=metric"
97
+ response = requests.get(url).json()
98
+
99
+ print(f"⛅ Weather API Response: {response}") # Debugging
100
+
101
+ if response.get("cod") != 200:
102
+ return f"⚠️ Couldn't fetch weather data for {city}. Try another city."
103
+
104
+ weather_desc = response["weather"][0]["description"]
105
+ temp = response["main"]["temp"]
106
+ return f"The weather in {city} is {weather_desc} with a temperature of {temp}°C."
107
+ except Exception as e:
108
+ print(f"❌ Weather API Error: {e}")
109
+ return "⚠️ Error fetching weather data."
110
+
111
+ def get_news():
112
+ try:
113
+ articles = newsapi.get_top_headlines(language='en', country='us')
114
+ headlines = [article['title'] for article in articles['articles'][:3]] # Get top 3 headlines
115
+ return "\n".join(headlines) if headlines else "⚠️ No news found."
116
+ except Exception as e:
117
+ print(f"❌ News API Error: {e}")
118
+ return "⚠️ Error fetching news."
119
+
120
+ def get_joke():
121
+ return pyjokes.get_joke()
122
+
123
+ def get_stock_price(stock_symbol):
124
+ try:
125
+ stock_symbol = stock_symbol.upper().strip()
126
+ if not stock_symbol:
127
+ return "⚠️ Please enter a valid stock symbol (e.g., AAPL, TSLA, MSFT)."
128
+
129
+ stock = yf.Ticker(stock_symbol)
130
+ stock_data = stock.history(period="1d")
131
+
132
+ if stock_data.empty: # Check if the stock data is empty
133
+ return f"⚠️ No stock data found for {stock_symbol}. It may be delisted or incorrect."
134
+
135
+ price = stock_data["Close"].iloc[-1] # Get the latest closing price
136
+ return f"📈 The current price of {stock_symbol} is **${price:.2f}**."
137
+
138
+ except Exception as e:
139
+ print(f"❌ Stock API Error: {e}")
140
+ return "⚠️ Error fetching stock price. Please try again later."
141
+
142
+ def translate_text(user_input):
143
+ try:
144
+ # Extract text & target language using regex (improved)
145
+ match = re.match(r"(?:translate\s+)?(.+?)\s+to\s+(\w+)", user_input, re.IGNORECASE)
146
+
147
+ if not match:
148
+ return "⚠️ Please use format: 'Hello to French' or 'Translate Hello to Spanish'."
149
+
150
+ text_to_translate, target_language = match.groups()
151
+ target_lang_code = LANGUAGE_CODES.get(target_language.lower())
152
+
153
+ if not target_lang_code:
154
+ return "⚠️ Unsupported language. Try using a valid language like 'French', 'Spanish', or 'German'."
155
+
156
+ print(f"🔄 Translating '{text_to_translate}' to {target_language} ({target_lang_code})...")
157
+ translator = GoogleTranslator(target=target_lang_code)
158
+ translated_text = translator.translate(text_to_translate.strip())
159
+
160
+ return f"🌍 Translated to {target_language.capitalize()}: {translated_text}"
161
+
162
+ except Exception as e:
163
+ print(f"❌ Translation Error: {e}") # Logs error for debugging
164
+ return f"⚠️ Error translating text: {e}"
165
+
166
+ def generate_code(prompt):
167
+ try:
168
+ print(f"🟡 Generating code for: {prompt}")
169
+
170
+ response = phi_pipeline(
171
+ f"Write Python code for {prompt}:",
172
+ max_length=75, # Reduced length
173
+ truncation=True,
174
+ ) # Removed device argument
175
+
176
+ generated_code = response[0]["generated_text"]
177
+
178
+ try:
179
+ generated_code = generated_code.split("```python")[-1].split("```")[0].strip()
180
+ except IndexError:
181
+ print("❌ Code extraction failed. Model output format is unexpected.")
182
+ return "⚠️ Error generating code. Model output format is unexpected."
183
+
184
+ return f"```python\n{generated_code}\n```"
185
+
186
+ except Exception as e:
187
+ print(f"❌ Code Generation Error: {type(e).__name__}: {e}")
188
+ import traceback
189
+ traceback.print_exc() # Print the full traceback
190
+ return "⚠️ Error generating code. Check the server logs for details."
191
+
192
+ def solve_math(user_input):
193
+ try:
194
+ print(f"🟡 Solving: {user_input}") # Debugging
195
+
196
+ # Extract only the math expression (remove words like "solve")
197
+ expression = re.sub(r"[^0-9+\-*/().]", "", user_input)
198
+
199
+ if not expression:
200
+ return "⚠️ No valid math expression found. Try: '5 + 3 * 2' or 'Solve 10 / 2'."
201
+
202
+ # Evaluate the expression
203
+ result = sp.N(sp.sympify(expression), 2) # Limit to 2 decimal places
204
+
205
+ return f"🧮 Answer: {result:.2f}" # Format as two decimal places
206
+
207
+ except Exception as e:
208
+ print(f"❌ Math Solver Error: {e}")
209
+ return "⚠️ Invalid math expression. Please check the equation."
210
+
211
+ def convert_currency(amount, from_currency, to_currency):
212
+ try:
213
+ # Convert currency using real-time exchange rates
214
+ converted_amount = currency_rates.convert(from_currency.upper(), to_currency.upper(), float(amount))
215
+ return f"💱 {amount} {from_currency.upper()} = {converted_amount:.2f} {to_currency.upper()}"
216
+ except Exception as e:
217
+ print(f"❌ Currency Conversion Error: {e}")
218
+ return "⚠️ Invalid currency code or conversion failed."
219
+
220
+ def analyze_sentiment(text):
221
+ try:
222
+ result = sentiment_model(text)[0]
223
+ label = result["label"] # "POSITIVE" or "NEGATIVE" or "NEUTRAL"
224
+ score = round(result["score"] * 100, 2) # Confidence Score in %
225
+
226
+ return f"📊 Sentiment: {label} ({score}% confidence)"
227
+ except Exception as e:
228
+ print(f"❌ Sentiment Analysis Error: {e}")
229
+ return "⚠️ Unable to analyze sentiment."
230
+
231
+ # Load Phi-1.5 model
232
+ MODEL_PATH = "./models/phi-1.5"
233
+
234
+ # List of expected model files
235
+ MODEL_FILES = [
236
+ "model-00001-of-00002.safetensors",
237
+ "model-00002-of-00002.safetensors",
238
+ "config.json",
239
+ "tokenizer.json",
240
+ "tokenizer_config.json",
241
+ "special_tokens_map.json",
242
+ "added_tokens.json",
243
+ "generation_config.json",
244
+ "merges.txt",
245
+ "model.safetensors.index.json",
246
+ "vocab.json"
247
+ ]
248
+
249
+ # Check if all model files exist
250
+ if all(os.path.exists(os.path.join(MODEL_PATH, f)) for f in MODEL_FILES):
251
+ print("✅ Models found locally. Skipping download.")
252
+ else:
253
+ print("❌ Model files missing! Please run `download_hf_model.py` to fetch the model.")
254
+ exit(1)
255
+
256
+ try:
257
+ print("🟡 Loading tokenizer...")
258
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH, local_files_only=True)
259
+ print("✅ Tokenizer loaded!")
260
+
261
+ print("🟡 Loading model...")
262
+ model = AutoModelForCausalLM.from_pretrained(
263
+ MODEL_PATH,
264
+ torch_dtype=torch.float32,
265
+ device_map={"": "cpu"} # Explicitly set to CPU
266
+ )
267
+ model.to("cuda" if torch.cuda.is_available() else "cpu") # Move to GPU if available
268
+ print("✅ Model loaded successfully!")
269
+
270
+ print("🟡 Creating pipeline...")
271
+ phi_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
272
+ print("✅ Pipeline created!")
273
+
274
+ except Exception as e:
275
+ print(f"❌ Error loading model: {type(e).__name__}: {e}")
276
+ import traceback
277
+ traceback.print_exc()
278
+ exit(1)
279
+
280
+ # Use Stability AI's Stable Diffusion Model
281
+ MODEL_NAME = "runwayml/stable-diffusion-v1-5"
282
+
283
+ def generate_image(prompt):
284
+ try:
285
+ print("🟡 Loading Stable Diffusion pipeline...")
286
+
287
+ # Enforce CPU usage
288
+ device = "cpu"
289
+ torch_dtype = torch.float32 # Use float32 for CPU
290
+
291
+ # Load Stable Diffusion model
292
+ pipe = StableDiffusionPipeline.from_pretrained(
293
+ "runwayml/stable-diffusion-v1-5",
294
+ torch_dtype=torch_dtype
295
+ ).to(device)
296
+
297
+ print(f"✅ Model loaded on {device.upper()}")
298
+
299
+ # Generate image with lower resolution (reduce RAM usage)
300
+ image = pipe(prompt, height=512, width=512).images[0]
301
+
302
+ # Save the image
303
+ image_path = "static/generated_image.png"
304
+ image.save(image_path)
305
+
306
+ print("✅ Image generated successfully!")
307
+ return f"🌆 Image generated successfully! View at: {image_path}"
308
+
309
+ except Exception as e:
310
+ print(f"❌ Image Generation Error: {e}")
311
+ import traceback
312
+ traceback.print_exc()
313
+ return "⚠️ Error generating image. Try again later."
314
+
315
+
316
+ @app.route("/")
317
+ def home():
318
+ return render_template("index.html")
319
+
320
+ @app.route('/voice', methods=['POST'])
321
+ def voice_chat():
322
+ try:
323
+ user_input = listen()
324
+ print(f"📥 Voice Input: {user_input}")
325
+
326
+ response = phi_pipeline(user_input, max_length=100, truncation=True)
327
+ generated_text = response[0]["generated_text"]
328
+ print(f"📤 AI Response: {generated_text}")
329
+
330
+ speak(generated_text) # AI reads response aloud
331
+ return jsonify({"response": generated_text})
332
+ except Exception as e:
333
+ return jsonify({"error": str(e)}), 500
334
+
335
+ @app.route('/chat', methods=['POST'])
336
+ def chat_with_phi1_5():
337
+ try:
338
+ data = request.get_json()
339
+ user_input = data["message"].strip()
340
+ print(f"📥 Received input: {user_input}")
341
+
342
+ # News Updates
343
+ if "news" in user_input.lower():
344
+ response_text = get_news()
345
+ # Wikipedia Search
346
+ elif "Wikipedia" in user_input:
347
+ response_text = search_wikipedia(user_input.replace("Wikipedia", "").strip())
348
+ # Google Search
349
+ elif "Google" in user_input:
350
+ response_text = google_search(user_input.replace("Google", "").strip())
351
+ # Weather Update
352
+ elif "weather" in user_input.lower():
353
+ response_text = get_weather(user_input.replace("weather", "").strip())
354
+ # Jokes
355
+ elif "joke" in user_input.lower():
356
+ response_text = get_joke()
357
+ # Stock Prices
358
+ elif "stock" in user_input.lower():
359
+ stock_symbol = user_input.replace("stock", "").strip().upper()
360
+ response_text = get_stock_price(stock_symbol)
361
+ # AI Image Generation
362
+ elif "generate image" in user_input.lower():
363
+ prompt = user_input.replace("generate image", "").strip()
364
+ response_text = generate_image(prompt)
365
+ # AI Language Translation
366
+ elif "translate" in user_input.lower():
367
+ response_text = translate_text(user_input) # Correct call to translate_text
368
+ # AI Code Generation
369
+ elif "generate code" in user_input.lower():
370
+ response_text = generate_code(user_input.replace("generate code", "").strip()) # Call the improved function
371
+ # AI Bug Fixing
372
+ elif "fix code" in user_input.lower():
373
+ response = phi_ai("Fix this code: " + user_input.replace("fix code", "").strip(), max_length=150, truncation=True)
374
+ response_text = f"🔹 Fixed Code:\n```python\n{response[0]['generated_text']}\n```"
375
+ # AI Math Solver
376
+ elif "solve" in user_input.lower() or "=" in user_input:
377
+ response_text = solve_math(user_input)
378
+ # Currency Conversion
379
+ elif "convert" in user_input and "to" in user_input:
380
+ try:
381
+ parts = user_input.replace("convert", "").strip().split(" to ")
382
+ amount_currency = parts[0].split()
383
+ amount = amount_currency[0]
384
+ from_currency = amount_currency[1]
385
+ to_currency = parts[1]
386
+ response_text = convert_currency(amount, from_currency, to_currency)
387
+ except:
388
+ response_text = "⚠️ Please use format: 'Convert 100 USD to EUR'."
389
+ # Sentiment Analysis
390
+ elif "analyze sentiment" in user_input.lower():
391
+ response_text = analyze_sentiment(user_input.replace("analyze sentiment", "").strip())
392
+ # Default AI Response
393
+ else:
394
+ response = phi_pipeline(user_input, max_length=100, truncation=True)
395
+ response_text = response[0]["generated_text"]
396
+
397
+ print(f"📤 AI Response: {response_text}")
398
+ return jsonify({"response": response_text})
399
+
400
+ except Exception as e:
401
+ print(f"❌ Chat Error: {type(e).__name__}: {e}") # Improved error handling
402
+ import traceback
403
+ traceback.print_exc() # Print traceback
404
+ return jsonify({"error": str(e)}), 500
405
+
406
 
407
+ if __name__ == '__main__':
408
+ print("🌍 Running Flask on http://0.0.0.0:5000")
409
+ app.run(host="0.0.0.0", port=5000, debug=True)
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ