Spaces:
Runtime error
Runtime error
| import json | |
| from dotenv import load_dotenv | |
| import os | |
| import re | |
| from g4f.client import Client | |
| load_dotenv() | |
| class Model: | |
| def __init__(self): | |
| self.gemini_api_key = os.getenv("GEMINI_API_KEY") | |
| self.gemini_model = os.getenv("GEMINI_MODEL") | |
| # Removed genai client initialization since it's not properly imported | |
| def fall_back_llm(self, prompt): | |
| """Fallback method using gpt-4o-mini when Gemini fails""" | |
| try: | |
| response = Client().chat.completions.create( | |
| model="gpt-4o-mini", | |
| messages=[{"role": "user", "content": prompt}], | |
| web_search=False | |
| ) | |
| return response.choices[0].message.content | |
| except Exception as e: | |
| return f"Both primary and fallback models failed. Error: {str(e)}" | |
| def send_message_openrouter(self, prompt): | |
| # Since genai client is not available, use fallback model directly | |
| return self.fall_back_llm(prompt) | |
| def llm(self, prompt, query): | |
| # Since genai client is not available, use fallback model directly | |
| combined_content = f"{prompt}\n\n{query}" | |
| return self.fall_back_llm(combined_content) | |
| def llm_image(self, text, image): | |
| # Image processing with LLM is not available without genai client | |
| print(f"Error in llm_image: genai client not available") | |
| return f"Error: Image processing not available - genai client not configured" | |
| def clean_json_response(self, response_text): | |
| """Clean the model's response to extract valid JSON.""" | |
| start = response_text.find('[') | |
| end = response_text.rfind(']') + 1 | |
| if start != -1 and end != -1: | |
| json_str = re.sub(r",\s*]", "]", response_text[start:end]) | |
| return json_str | |
| return response_text | |
| def skinScheduler(self, prompt, max_retries=3): | |
| """Generate a skincare schedule with retries and cleaning.""" | |
| # Since genai client is not available, use fallback model directly | |
| try: | |
| fallback_response = self.fall_back_llm(prompt) | |
| cleaned_fallback = self.clean_json_response(fallback_response) | |
| return json.loads(cleaned_fallback) | |
| except json.JSONDecodeError: | |
| return {"error": "Failed to produce valid JSON"} | |
| except Exception as e: | |
| return {"error": f"Model failed: {str(e)}"} |