Spaces:
Sleeping
Sleeping
| import contextlib | |
| from flask import Flask, request, jsonify | |
| from flask_cors import CORS | |
| from scipy import io | |
| import torch | |
| # Import the specific functions from your ai_thingy script | |
| # Make sure your AI script is named ai_thingy.py | |
| from ai_thingy import ( | |
| initialize_or_retrain, | |
| generate_text, | |
| current_model, | |
| current_tokenizer, | |
| device, | |
| MAX_SEQ_LENGTH | |
| ) | |
| import ai_thingy | |
| app = Flask(__name__) | |
| CORS(app) # This allows your JS to talk to this Python server | |
| # Initialize the model once when the server starts | |
| print("[Bridge] Waking up the Aoban Brain...") | |
| initialize_or_retrain(initial_train=True, epochs=10) | |
| def ask(): | |
| try: | |
| data = request.json | |
| user_prompt = data.get("input", "") | |
| # Use the imported generate_text | |
| # We pass None for penalty/temp to let it use the script's defaults | |
| response = ai_thingy.generate_text( | |
| ai_thingy.current_model, | |
| ai_thingy.current_tokenizer, | |
| user_prompt, | |
| ai_thingy.MAX_SEQ_LENGTH, | |
| ai_thingy.device, | |
| top_k=3, | |
| penalty=3.0, | |
| temperature=1.0 | |
| ) | |
| return jsonify({"response": response, "logs": "Generation successful."}) | |
| except Exception as e: | |
| print(f"[CRASH] Aoban Brain Error: {e}") | |
| return jsonify({"response": f"Brain Error: {str(e)}", "logs": str(e)}), 500 | |
| if __name__ == "__main__": | |
| # The cloud will tell us which port to use; default to 7860 for Hugging Face | |
| import os | |
| port = int(os.environ.get("PORT", 7860)) | |
| app.run(host="0.0.0.0", port=port) |