Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
|
| 2 |
import os
|
| 3 |
from flask import Flask, request, jsonify
|
| 4 |
from llama_cpp import Llama
|
|
@@ -6,24 +5,42 @@ from huggingface_hub import hf_hub_download
|
|
| 6 |
|
| 7 |
app = Flask(__name__)
|
| 8 |
|
| 9 |
-
#
|
| 10 |
REPO_ID = "CyberCoder225/maira-model"
|
| 11 |
FILENAME = "SmolLM2-360M-Instruct.Q4_K_M.gguf"
|
| 12 |
|
| 13 |
-
#
|
| 14 |
print("Fetching Maira's brain from Hugging Face...")
|
| 15 |
model_path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
|
| 16 |
|
|
|
|
| 17 |
llm = Llama(model_path=model_path, n_ctx=2048)
|
| 18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
@app.route('/chat', methods=['POST'])
|
| 20 |
def chat():
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
if __name__ == "__main__":
|
| 29 |
-
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
from flask import Flask, request, jsonify
|
| 3 |
from llama_cpp import Llama
|
|
|
|
| 5 |
|
| 6 |
app = Flask(__name__)
|
| 7 |
|
| 8 |
+
# Model details
|
| 9 |
REPO_ID = "CyberCoder225/maira-model"
|
| 10 |
FILENAME = "SmolLM2-360M-Instruct.Q4_K_M.gguf"
|
| 11 |
|
| 12 |
+
# 1. Downloads the model to the Space's storage
|
| 13 |
print("Fetching Maira's brain from Hugging Face...")
|
| 14 |
model_path = hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
|
| 15 |
|
| 16 |
+
# 2. Loads the model into RAM
|
| 17 |
llm = Llama(model_path=model_path, n_ctx=2048)
|
| 18 |
|
| 19 |
+
# --- ADDED THIS TO FIX THE 404 ---
|
| 20 |
+
@app.route('/', methods=['GET'])
|
| 21 |
+
def home():
|
| 22 |
+
return jsonify({
|
| 23 |
+
"status": "online",
|
| 24 |
+
"message": "Maira API is running! Send POST requests to /chat",
|
| 25 |
+
"model": FILENAME
|
| 26 |
+
})
|
| 27 |
+
|
| 28 |
@app.route('/chat', methods=['POST'])
|
| 29 |
def chat():
|
| 30 |
+
try:
|
| 31 |
+
data = request.json
|
| 32 |
+
user_input = data.get("message", "")
|
| 33 |
+
|
| 34 |
+
# Simple prompt format for SmolLM
|
| 35 |
+
prompt = f"### User: {user_input}\n### Maira:"
|
| 36 |
+
|
| 37 |
+
output = llm(prompt, max_tokens=150, stop=["###", "</s>"], echo=False)
|
| 38 |
+
response = output["choices"][0]["text"].strip()
|
| 39 |
+
|
| 40 |
+
return jsonify({"maira": response})
|
| 41 |
+
except Exception as e:
|
| 42 |
+
return jsonify({"error": str(e)}), 500
|
| 43 |
|
| 44 |
if __name__ == "__main__":
|
| 45 |
+
# --- FIXED PORT FOR HUGGING FACE ---
|
| 46 |
+
app.run(host="0.0.0.0", port=7860)
|