Spaces:
Runtime error
Runtime error
File size: 3,499 Bytes
eebfa3a 75c424b 0b2e460 75c424b eb909f0 eb2aa92 75c424b eebfa3a eb2aa92 eebfa3a 0b2e460 9f6e9c1 eebfa3a cc96503 eb909f0 eebfa3a eb2aa92 adcd334 eebfa3a 9f6e9c1 eb2aa92 2a9f597 eebfa3a 70507c4 eb2aa92 9f6e9c1 70507c4 eb2aa92 0b2e460 eebfa3a eb2aa92 2072b24 2c48b6b 9f6e9c1 eb2aa92 adcd334 9f6e9c1 75c424b eb909f0 eb2aa92 eb909f0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 | from flask import Flask, request, jsonify
import requests
import os
import re
app = Flask(__name__)
# Hugging Face Ayarları
HF_TOKEN = os.getenv("HF_TOKEN")
API_URL = "https://router.huggingface.co/v1/chat/completions"
# ÇALIŞAN ELİT 50 MODEL
MODELLER = [
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8", "deepcogito/cogito-671b-v2.1-FP8",
"deepseek-ai/DeepSeek-V3.2-Exp", "Qwen/Qwen3.5-397B-A17B", "Qwen/Qwen3-Coder-480B-A35B-Instruct-FP8",
"meta-llama/Llama-3.3-70B-Instruct", "deepseek-ai/DeepSeek-R1", "google/gemma-3-27b-it",
"openai/gpt-oss-120b", "Qwen/QwQ-32B", "meta-llama/Llama-4-Scout-17B-16E-Instruct",
"baidu/ERNIE-4.5-300B-A47B-Base-PT", "moonshotai/Kimi-K2.5", "deepseek-ai/DeepSeek-V3",
"mistralai/Mistral-Small-2409", "Qwen/Qwen2.5-72B-Instruct", "meta-llama/Llama-3.1-70B-Instruct",
"CohereLabs/c4ai-command-a-03-2025", "meta-llama/Llama-3.2-3B-Instruct", "google/gemma-3n-E4B-it",
"Qwen/Qwen3-235B-A22B-Thinking-2507", "CohereLabs/aya-expanse-32b", "meta-llama/Meta-Llama-3-70B-Instruct",
"allenai/Olmo-3.1-32B-Instruct", "Qwen/Qwen2.5-Coder-32B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2",
"meta-llama/Llama-3.1-8B-Instruct", "nvidia/NVIDIA-Nemotron-Nano-9B-v2", "Qwen/Qwen3-8B",
"allenai/Olmo-3.1-32B-Think", "CohereLabs/c4ai-command-r-08-2024", "meta-llama/Llama-3.2-1B-Instruct",
"Qwen/Qwen3-32B", "google/gemma-2-9b-it", "mistralai/Mistral-Nemo-Instruct-2407",
"microsoft/Phi-3.5-mini-instruct", "meta-llama/Llama-Guard-3-8B", "Qwen/Qwen2.5-7B-Instruct",
"allenai/Olmo-3-7B-Instruct", "CohereLabs/c4ai-command-r7b-12-2024", "Qwen/Qwen3-14B",
"google/gemma-2-2b-it", "meta-llama/Llama-3-8B-Instruct", "Qwen/Qwen2.5-Coder-7B-Instruct",
"mistralai/Pixtral-12B-2409", "Qwen/Qwen3-Next-80B-A3B-Instruct", "google/gemma-3-4b-it",
"meta-llama/Llama-3.1-405B-Instruct-FP8", "deepseek-ai/DeepSeek-V2.5", "Qwen/Qwen2.5-1.5B-Instruct"
]
model_sayaci = 0
@app.route("/", methods=["POST"])
def handle_request():
global model_sayaci
secilen_model = MODELLER[model_sayaci % len(MODELLER)]
model_sayaci += 1
try:
data = request.get_json()
istatistikler = data.get("istatistikler", "")
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
payload = {
"model": secilen_model,
"messages": [
{"role": "system", "content": "SADECE 'İsim: Puan' formatında cevap ver. 4.0-10.0 arası."},
{"role": "user", "content": istatistikler}
],
"max_tokens": 80,
"temperature": 0.0
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=25)
if response.status_code == 200:
cevap = response.json()['choices'][0]['message']['content']
if "</think>" in cevap: cevap = cevap.split("</think>")[-1]
cevap = re.sub(r'(?i)(okay|let|here|rating|score|analysis|sure|certainly).*', '', cevap).strip()
return jsonify({
"model": secilen_model,
"puanlar": cevap.replace(".", ","),
"sayac": model_sayaci
})
return jsonify({"error": f"Model Hatası: {response.status_code}"}), response.status_code
except Exception as e:
return jsonify({"error": str(e)}), 500
if __name__ == "__main__":
# HF için PORT 7860 şarttır!
app.run(host="0.0.0.0", port=7860) |