Spaces:
Sleeping
Sleeping
File size: 1,653 Bytes
b86532d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 | import contextlib
from flask import Flask, request, jsonify
from flask_cors import CORS
from scipy import io
import torch
# Import the specific functions from your ai_thingy script
# Make sure your AI script is named ai_thingy.py
from ai_thingy import (
initialize_or_retrain,
generate_text,
current_model,
current_tokenizer,
device,
MAX_SEQ_LENGTH
)
import ai_thingy
app = Flask(__name__)
CORS(app) # This allows your JS to talk to this Python server
# Initialize the model once when the server starts
print("[Bridge] Waking up the Aoban Brain...")
initialize_or_retrain(initial_train=True, epochs=10)
@app.route('/ask', methods=['POST'])
def ask():
try:
data = request.json
user_prompt = data.get("input", "")
# Use the imported generate_text
# We pass None for penalty/temp to let it use the script's defaults
response = ai_thingy.generate_text(
ai_thingy.current_model,
ai_thingy.current_tokenizer,
user_prompt,
ai_thingy.MAX_SEQ_LENGTH,
ai_thingy.device,
top_k=3,
penalty=3.0,
temperature=1.0
)
return jsonify({"response": response, "logs": "Generation successful."})
except Exception as e:
print(f"[CRASH] Aoban Brain Error: {e}")
return jsonify({"response": f"Brain Error: {str(e)}", "logs": str(e)}), 500
if __name__ == "__main__":
# The cloud will tell us which port to use; default to 7860 for Hugging Face
import os
port = int(os.environ.get("PORT", 7860))
app.run(host="0.0.0.0", port=port) |