Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
from flask_cors import CORS
|
| 3 |
from brain import MairaBrain
|
| 4 |
|
|
@@ -6,24 +7,18 @@ app = Flask(__name__)
|
|
| 6 |
CORS(app)
|
| 7 |
|
| 8 |
# --- Configuration for the 5 Neural Cores ---
|
| 9 |
-
|
| 10 |
-
# 1. Maira Lite (Original Small)
|
| 11 |
REPO_SMALL = "CyberCoder225/maira-model"
|
| 12 |
FILE_SMALL = "SmolLM2-360M-Instruct.Q4_K_M.gguf"
|
| 13 |
|
| 14 |
-
# 2. Maira Prime (Llama 3.2 1B)
|
| 15 |
REPO_PRIME = "bartowski/Llama-3.2-1B-Instruct-GGUF"
|
| 16 |
FILE_PRIME = "Llama-3.2-1B-Instruct-Q4_K_M.gguf"
|
| 17 |
|
| 18 |
-
# 3. Maira Logic (Qwen 2.5 1.5B)
|
| 19 |
REPO_LOGIC = "Qwen/Qwen2.5-1.5B-Instruct-GGUF"
|
| 20 |
FILE_LOGIC = "qwen2.5-1.5b-instruct-q4_k_m.gguf"
|
| 21 |
|
| 22 |
-
# 4. Maira Chat (Danube 3 500M) - FIXED FILENAME
|
| 23 |
REPO_CHAT = "h2oai/h2o-danube3-500m-chat-GGUF"
|
| 24 |
FILE_CHAT = "h2o-danube3-500m-chat-Q4_K_M.gguf"
|
| 25 |
|
| 26 |
-
# 5. Maira Art (Granite 3.0 2B) - SWAPPED TO OPEN MODEL
|
| 27 |
REPO_ART = "bartowski/granite-3.0-2b-instruct-GGUF"
|
| 28 |
FILE_ART = "granite-3.0-2b-instruct-Q4_K_M.gguf"
|
| 29 |
|
|
@@ -38,22 +33,26 @@ print("✅ All Cores Online. Creator: CyberCoder225")
|
|
| 38 |
|
| 39 |
@app.route('/', methods=['GET'])
|
| 40 |
def home():
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
@app.route('/chat', methods=['POST'])
|
| 49 |
def chat():
|
| 50 |
try:
|
| 51 |
data = request.json
|
| 52 |
-
user_id = data.get("user_id", "
|
| 53 |
user_input = data.get("message", "")
|
| 54 |
model_type = data.get("model_type", "small")
|
| 55 |
|
| 56 |
-
# Routing Logic
|
| 57 |
if model_type == "medium":
|
| 58 |
answer = maira_prime.get_response(user_id, user_input)
|
| 59 |
model_used = "maira-prime"
|
|
@@ -63,19 +62,20 @@ def chat():
|
|
| 63 |
elif model_type == "danube":
|
| 64 |
answer = maira_chat.get_response(user_id, user_input)
|
| 65 |
model_used = "maira-chat"
|
| 66 |
-
elif model_type == "granite":
|
| 67 |
answer = maira_art.get_response(user_id, user_input)
|
| 68 |
model_used = "maira-art"
|
| 69 |
else:
|
| 70 |
answer = maira_lite.get_response(user_id, user_input)
|
| 71 |
model_used = "maira-lite"
|
| 72 |
-
|
| 73 |
return jsonify({
|
| 74 |
"maira": answer,
|
| 75 |
"metadata": {"model": model_used, "creator": "CyberCoder225"}
|
| 76 |
})
|
| 77 |
except Exception as e:
|
| 78 |
-
|
|
|
|
| 79 |
|
| 80 |
if __name__ == "__main__":
|
| 81 |
app.run(host="0.0.0.0", port=7860)
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from flask import Flask, request, jsonify, render_template
|
| 3 |
from flask_cors import CORS
|
| 4 |
from brain import MairaBrain
|
| 5 |
|
|
|
|
| 7 |
CORS(app)
|
| 8 |
|
| 9 |
# --- Configuration for the 5 Neural Cores ---
|
|
|
|
|
|
|
| 10 |
REPO_SMALL = "CyberCoder225/maira-model"
|
| 11 |
FILE_SMALL = "SmolLM2-360M-Instruct.Q4_K_M.gguf"
|
| 12 |
|
|
|
|
| 13 |
REPO_PRIME = "bartowski/Llama-3.2-1B-Instruct-GGUF"
|
| 14 |
FILE_PRIME = "Llama-3.2-1B-Instruct-Q4_K_M.gguf"
|
| 15 |
|
|
|
|
| 16 |
REPO_LOGIC = "Qwen/Qwen2.5-1.5B-Instruct-GGUF"
|
| 17 |
FILE_LOGIC = "qwen2.5-1.5b-instruct-q4_k_m.gguf"
|
| 18 |
|
|
|
|
| 19 |
REPO_CHAT = "h2oai/h2o-danube3-500m-chat-GGUF"
|
| 20 |
FILE_CHAT = "h2o-danube3-500m-chat-Q4_K_M.gguf"
|
| 21 |
|
|
|
|
| 22 |
REPO_ART = "bartowski/granite-3.0-2b-instruct-GGUF"
|
| 23 |
FILE_ART = "granite-3.0-2b-instruct-Q4_K_M.gguf"
|
| 24 |
|
|
|
|
| 33 |
|
| 34 |
@app.route('/', methods=['GET'])
|
| 35 |
def home():
|
| 36 |
+
# This serves your index.html from the /templates folder
|
| 37 |
+
try:
|
| 38 |
+
return render_template('index.html')
|
| 39 |
+
except:
|
| 40 |
+
# Fallback if you haven't created the /templates folder yet
|
| 41 |
+
return jsonify({
|
| 42 |
+
"status": "online",
|
| 43 |
+
"message": "HTML not found. Use /chat API or add index.html to /templates folder.",
|
| 44 |
+
"owner": "CyberCoder225"
|
| 45 |
+
})
|
| 46 |
|
| 47 |
@app.route('/chat', methods=['POST'])
|
| 48 |
def chat():
|
| 49 |
try:
|
| 50 |
data = request.json
|
| 51 |
+
user_id = data.get("user_id", "CyberCoder225")
|
| 52 |
user_input = data.get("message", "")
|
| 53 |
model_type = data.get("model_type", "small")
|
| 54 |
|
| 55 |
+
# Routing Logic
|
| 56 |
if model_type == "medium":
|
| 57 |
answer = maira_prime.get_response(user_id, user_input)
|
| 58 |
model_used = "maira-prime"
|
|
|
|
| 62 |
elif model_type == "danube":
|
| 63 |
answer = maira_chat.get_response(user_id, user_input)
|
| 64 |
model_used = "maira-chat"
|
| 65 |
+
elif model_type == "granite":
|
| 66 |
answer = maira_art.get_response(user_id, user_input)
|
| 67 |
model_used = "maira-art"
|
| 68 |
else:
|
| 69 |
answer = maira_lite.get_response(user_id, user_input)
|
| 70 |
model_used = "maira-lite"
|
| 71 |
+
|
| 72 |
return jsonify({
|
| 73 |
"maira": answer,
|
| 74 |
"metadata": {"model": model_used, "creator": "CyberCoder225"}
|
| 75 |
})
|
| 76 |
except Exception as e:
|
| 77 |
+
print(f"❌ Error: {str(e)}")
|
| 78 |
+
return jsonify({"maira": f"System error, boss: {str(e)}"}), 500
|
| 79 |
|
| 80 |
if __name__ == "__main__":
|
| 81 |
app.run(host="0.0.0.0", port=7860)
|