#!/usr/bin/env python3 """ Flask App with Gunicorn for Deep Modal Files Economics Chat Application using Qwen2 model """ from flask import Flask, request, jsonify, render_template_string import torch from transformers import AutoTokenizer, AutoModelForCausalLM import os import logging # Configure logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) app = Flask(__name__) # Global variables for model and tokenizer model = None tokenizer = None # HTML template HTML_TEMPLATE = """ AEGIS Economics AI

🏛️ AEGIS Economics AI

Advanced Economic Analysis & Policy Insights

Hello! I'm AEGIS Economics AI. Ask me about economic policies, market analysis, or financial strategies.
Checking model status...
""" def load_model(): """Load the Qwen2 model and tokenizer from HF repository""" global model, tokenizer try: logger.info("Loading model and tokenizer from Hugging Face...") # Load from the deployed model repository model_repo = "Gaston895/Aegisecon1" logger.info(f"Loading tokenizer from {model_repo}...") tokenizer = AutoTokenizer.from_pretrained( model_repo, trust_remote_code=True, use_auth_token=False ) logger.info(f"Loading model from {model_repo}...") model = AutoModelForCausalLM.from_pretrained( model_repo, torch_dtype=torch.float16, # Changed from bfloat16 for better compatibility device_map="cpu", # Force CPU for HF Spaces compatibility trust_remote_code=True, use_auth_token=False, low_cpu_mem_usage=True ) logger.info("Model loaded successfully from HF repository!") return True except Exception as e: logger.error(f"Error loading model from HF: {str(e)}") # Try alternative loading method try: logger.info("Trying alternative loading method...") tokenizer = AutoTokenizer.from_pretrained( "Qwen/Qwen2-1.5B", # Fallback to base model trust_remote_code=True ) model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen2-1.5B", torch_dtype=torch.float16, device_map="cpu", trust_remote_code=True, low_cpu_mem_usage=True ) logger.info("Fallback model loaded successfully!") return True except Exception as e2: logger.error(f"Fallback loading also failed: {str(e2)}") return False def generate_response(prompt): """Generate response using the loaded model""" try: if model is None or tokenizer is None: return "Model is still loading, please wait a moment and try again..." # Economics-focused system prompt system_prompt = """You are AEGIS Economics AI, an expert economic analyst and policy advisor. Provide clear, accurate, and insightful responses about economics, finance, markets, and policy. Focus on practical analysis and actionable insights.""" full_prompt = f"{system_prompt}\n\nUser: {prompt}\nAssistant:" # Tokenize input inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=1024) # Generate response with torch.no_grad(): outputs = model.generate( inputs.input_ids, max_new_tokens=256, # Reduced for faster generation temperature=0.7, do_sample=True, pad_token_id=tokenizer.eos_token_id, repetition_penalty=1.1, no_repeat_ngram_size=3 ) # Decode response response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Extract only the assistant's response if "Assistant:" in response: response = response.split("Assistant:")[-1].strip() return response except Exception as e: logger.error(f"Error generating response: {str(e)}") return "I apologize, but I'm having trouble processing your request right now. Please try again in a moment." @app.route('/') def home(): """Serve the main chat interface""" return render_template_string(HTML_TEMPLATE) @app.route('/chat', methods=['POST']) def chat(): """Handle chat messages""" try: data = request.get_json() user_message = data.get('message', '') if not user_message: return jsonify({'error': 'No message provided'}), 400 # Generate AI response ai_response = generate_response(user_message) return jsonify({'response': ai_response}) except Exception as e: logger.error(f"Error in chat endpoint: {str(e)}") return jsonify({'error': 'Internal server error'}), 500 @app.route('/health') def health(): """Health check endpoint""" return jsonify({ 'status': 'healthy', 'model_loaded': model is not None, 'tokenizer_loaded': tokenizer is not None, 'model_info': 'Gaston895/Aegisecon1' if model is not None else 'Not loaded' }) @app.route('/load_model', methods=['POST']) def load_model_endpoint(): """Endpoint to trigger model loading""" try: success = load_model() return jsonify({ 'success': success, 'model_loaded': model is not None, 'tokenizer_loaded': tokenizer is not None }) except Exception as e: return jsonify({'error': str(e)}), 500 if __name__ == '__main__': # Load model on startup logger.info("Starting AEGIS Economics AI...") # Try to load model, but don't fail if it doesn't work logger.info("Attempting to load model...") model_loaded = load_model() if model_loaded: logger.info("Model loaded successfully, starting server...") else: logger.warning("Model failed to load, starting server anyway. Model can be loaded via /load_model endpoint.") app.run(host='0.0.0.0', port=7860, debug=False)