Hadiil commited on
Commit
cb3aa77
·
verified ·
1 Parent(s): 3e341ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -26
app.py CHANGED
@@ -63,9 +63,17 @@ app.mount("/images", StaticFiles(directory="images"), name="images")
63
 
64
  # Gemini API Configuration
65
  API_KEY = os.getenv('AIzaSyDtLhhmXpy8ubSGb84ImaxM_ywlL0l_8bo')
 
66
  if not API_KEY:
67
- raise ValueError("GEMINI_API_KEY environment variable is not set")
68
- genai.configure(api_key=API_KEY)
 
 
 
 
 
 
 
69
 
70
  # Model configurations
71
  MODELS = {
@@ -118,6 +126,9 @@ def load_model(task: str, model_name: str = None):
118
  model_to_load = model_name or MODELS.get(task)
119
 
120
  if task == "chatbot":
 
 
 
121
  return genai.GenerativeModel(model_to_load)
122
 
123
  if task == "visual-qa":
@@ -156,10 +167,14 @@ def load_model(task: str, model_name: str = None):
156
 
157
  def get_gemini_response(user_input: str, is_generation: bool = False):
158
  """Function to generate response with Gemini for both chat and text generation"""
 
 
159
  if not user_input:
160
  return "Please provide some input."
161
  try:
162
  chatbot = load_model("chatbot")
 
 
163
  if is_generation:
164
  prompt = f"Generate creative text based on this prompt: {user_input}"
165
  else:
@@ -470,9 +485,12 @@ async def process_input(
470
  final_summary = re.sub(r'\s+', ' ', final_summary).strip()
471
  if not final_summary or final_summary.lower().startswith(content.lower()[:30]):
472
  logger.warning("Summarizer produced inadequate output, falling back to Gemini")
473
- final_summary = get_gemini_response(
474
- f"Summarize this text in a concise and meaningful way: {content}"
475
- )
 
 
 
476
 
477
  if not final_summary.endswith(('.', '!', '?')):
478
  final_summary += '.'
@@ -482,9 +500,12 @@ async def process_input(
482
 
483
  except Exception as e:
484
  logger.error(f"Summarization error: {str(e)}")
485
- final_summary = get_gemini_response(
486
- f"Summarize this text in a concise and meaningful way: {content}"
487
- )
 
 
 
488
  return {"response": final_summary, "type": "summary", "message": "Text was preprocessed to correct spelling errors"}
489
 
490
  elif intent == "image-to-text":
@@ -537,11 +558,15 @@ async def process_input(
537
  if is_factual:
538
  final_answer = answer
539
  else:
540
- chatbot = load_model("chatbot")
541
- if "fly" in question.lower():
542
- final_answer = chatbot.generate_content(f"Make this fun and spacey: {answer}").text.strip()
 
 
 
543
  else:
544
- final_answer = chatbot.generate_content(f"Make this cosmic and poetic: {answer}").text.strip()
 
545
 
546
  logger.info(f"Final VQA answer: {final_answer}")
547
 
@@ -572,9 +597,13 @@ async def process_input(
572
  return {"response": response, "type": "visualization_code"}
573
 
574
  elif intent == "text-generation":
575
- response = get_gemini_response(text, is_generation=True)
576
- lines = response.split(". ")
577
- formatted_poem = "\n".join(line.strip() + ("." if not line.endswith(".") else "") for line in lines if line)
 
 
 
 
578
  return {"response": formatted_poem, "type": "generated_text"}
579
 
580
  elif intent == "file-qa":
@@ -590,6 +619,15 @@ async def process_input(
590
  qa_pipeline = load_model("file-qa")
591
  if qa_pipeline is None:
592
  logger.info("Using Gemini fallback for file-qa")
 
 
 
 
 
 
 
 
 
593
  question = text.strip()
594
  if not question.endswith('?'):
595
  question += '?'
@@ -627,8 +665,12 @@ async def process_input(
627
  best_answer += '.'
628
 
629
  try:
630
- chatbot = load_model("chatbot")
631
- final_answer = chatbot.generate_content(f"Make this cosmic and poetic: {best_answer}").text.strip()
 
 
 
 
632
  except Exception as e:
633
  logger.warning(f"Failed to add cosmic tone: {str(e)}. Using raw answer.")
634
  final_answer = best_answer
@@ -816,12 +858,19 @@ async def home():
816
  @app.get("/health")
817
  async def health_check():
818
  """Health check endpoint"""
819
- return {"status": "healthy", "version": "2.0.0"}
 
 
 
 
820
 
821
  @app.get("/models")
822
  async def list_models():
823
  """List available models"""
824
- return {"models": MODELS}
 
 
 
825
 
826
  @app.on_event("startup")
827
  async def startup_event():
@@ -848,13 +897,11 @@ async def startup_event():
848
  except Exception as e:
849
  logger.error(f"Error pre-loading translation model: {str(e)}")
850
 
851
- await asyncio.gather(
852
- load_model_with_timeout("summarization"),
853
- load_model_with_timeout("image-to-text"),
854
- load_model_with_timeout("visual-qa"),
855
- load_model_with_timeout("chatbot"),
856
- load_model_with_timeout("file-qa")
857
- )
858
 
859
  if __name__ == "__main__":
860
  import uvicorn
 
63
 
64
  # Gemini API Configuration
65
  API_KEY = os.getenv('AIzaSyDtLhhmXpy8ubSGb84ImaxM_ywlL0l_8bo')
66
+ GEMINI_AVAILABLE = True
67
  if not API_KEY:
68
+ logger.warning("GEMINI_API_KEY not set. Gemini-dependent features (chatbot, file QA fallback, summarization fallback, text generation) will be disabled.")
69
+ GEMINI_AVAILABLE = False
70
+ else:
71
+ try:
72
+ genai.configure(api_key=API_KEY)
73
+ logger.info("Gemini API configured successfully")
74
+ except Exception as e:
75
+ logger.error(f"Failed to configure Gemini API: {str(e)}. Disabling Gemini features.")
76
+ GEMINI_AVAILABLE = False
77
 
78
  # Model configurations
79
  MODELS = {
 
126
  model_to_load = model_name or MODELS.get(task)
127
 
128
  if task == "chatbot":
129
+ if not GEMINI_AVAILABLE:
130
+ logger.warning("Gemini not available. Returning None for chatbot task.")
131
+ return None
132
  return genai.GenerativeModel(model_to_load)
133
 
134
  if task == "visual-qa":
 
167
 
168
  def get_gemini_response(user_input: str, is_generation: bool = False):
169
  """Function to generate response with Gemini for both chat and text generation"""
170
+ if not GEMINI_AVAILABLE:
171
+ return "Error: Gemini API is not available. Please contact the administrator."
172
  if not user_input:
173
  return "Please provide some input."
174
  try:
175
  chatbot = load_model("chatbot")
176
+ if not chatbot:
177
+ return "Error: Gemini API is not available."
178
  if is_generation:
179
  prompt = f"Generate creative text based on this prompt: {user_input}"
180
  else:
 
485
  final_summary = re.sub(r'\s+', ' ', final_summary).strip()
486
  if not final_summary or final_summary.lower().startswith(content.lower()[:30]):
487
  logger.warning("Summarizer produced inadequate output, falling back to Gemini")
488
+ if GEMINI_AVAILABLE:
489
+ final_summary = get_gemini_response(
490
+ f"Summarize this text in a concise and meaningful way: {content}"
491
+ )
492
+ else:
493
+ final_summary = "Summarization fallback unavailable without Gemini API."
494
 
495
  if not final_summary.endswith(('.', '!', '?')):
496
  final_summary += '.'
 
500
 
501
  except Exception as e:
502
  logger.error(f"Summarization error: {str(e)}")
503
+ if GEMINI_AVAILABLE:
504
+ final_summary = get_gemini_response(
505
+ f"Summarize this text in a concise and meaningful way: {content}"
506
+ )
507
+ else:
508
+ final_summary = "Summarization failed and Gemini fallback is unavailable."
509
  return {"response": final_summary, "type": "summary", "message": "Text was preprocessed to correct spelling errors"}
510
 
511
  elif intent == "image-to-text":
 
558
  if is_factual:
559
  final_answer = answer
560
  else:
561
+ if GEMINI_AVAILABLE:
562
+ chatbot = load_model("chatbot")
563
+ if "fly" in question.lower():
564
+ final_answer = chatbot.generate_content(f"Make this fun and spacey: {answer}").text.strip()
565
+ else:
566
+ final_answer = chatbot.generate_content(f"Make this cosmic and poetic: {answer}").text.strip()
567
  else:
568
+ final_answer = answer
569
+ logger.warning("Gemini unavailable for enhancing VQA answer")
570
 
571
  logger.info(f"Final VQA answer: {final_answer}")
572
 
 
597
  return {"response": response, "type": "visualization_code"}
598
 
599
  elif intent == "text-generation":
600
+ if GEMINI_AVAILABLE:
601
+ response = get_gemini_response(text, is_generation=True)
602
+ lines = response.split(". ")
603
+ formatted_poem = "\n".join(line.strip() + ("." if not line.endswith(".") else "") for line in lines if line)
604
+ else:
605
+ response = "Text generation is unavailable without Gemini API."
606
+ formatted_poem = response
607
  return {"response": formatted_poem, "type": "generated_text"}
608
 
609
  elif intent == "file-qa":
 
619
  qa_pipeline = load_model("file-qa")
620
  if qa_pipeline is None:
621
  logger.info("Using Gemini fallback for file-qa")
622
+ if not GEMINI_AVAILABLE:
623
+ return {
624
+ "response": "File QA is unavailable without Gemini API or a working QA model.",
625
+ "type": "file_qa",
626
+ "additional_data": {
627
+ "question": text,
628
+ "file_name": file.filename
629
+ }
630
+ }
631
  question = text.strip()
632
  if not question.endswith('?'):
633
  question += '?'
 
665
  best_answer += '.'
666
 
667
  try:
668
+ if GEMINI_AVAILABLE:
669
+ chatbot = load_model("chatbot")
670
+ final_answer = chatbot.generate_content(f"Make this cosmic and poetic: {best_answer}").text.strip()
671
+ else:
672
+ final_answer = best_answer
673
+ logger.warning("Gemini unavailable for enhancing file QA answer")
674
  except Exception as e:
675
  logger.warning(f"Failed to add cosmic tone: {str(e)}. Using raw answer.")
676
  final_answer = best_answer
 
858
  @app.get("/health")
859
  async def health_check():
860
  """Health check endpoint"""
861
+ return {
862
+ "status": "healthy",
863
+ "version": "2.0.0",
864
+ "gemini_available": GEMINI_AVAILABLE
865
+ }
866
 
867
  @app.get("/models")
868
  async def list_models():
869
  """List available models"""
870
+ available_models = MODELS.copy()
871
+ if not GEMINI_AVAILABLE:
872
+ available_models["chatbot"] = "disabled (Gemini API unavailable)"
873
+ return {"models": available_models}
874
 
875
  @app.on_event("startup")
876
  async def startup_event():
 
897
  except Exception as e:
898
  logger.error(f"Error pre-loading translation model: {str(e)}")
899
 
900
+ tasks = ["summarization", "image-to-text", "visual-qa", "file-qa"]
901
+ if GEMINI_AVAILABLE:
902
+ tasks.append("chatbot")
903
+
904
+ await asyncio.gather(*(load_model_with_timeout(task) for task in tasks))
 
 
905
 
906
  if __name__ == "__main__":
907
  import uvicorn