zeroai87 commited on
Commit
48b261a
·
verified ·
1 Parent(s): faa09f7

Upload start.sh with huggingface_hub

Browse files
Files changed (1) hide show
  1. start.sh +143 -0
start.sh ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ echo "🚀 Starting Jaksel AI on Hugging Face Spaces..."
4
+
5
+ # Imposta environment variables
6
+ export OLLAMA_HOST=0.0.0.0
7
+ export HF_HOME=/data
8
+ export TRANSFORMERS_CACHE=/data/transformers_cache
9
+
10
+ # Assicura che la directory esista
11
+ mkdir -p /root/.ollama
12
+
13
+ # Avvia Ollama in background
14
+ echo "📥 Starting Ollama server..."
15
+ ollama serve --host 0.0.0.0 --port 11434 &
16
+ OLLAMA_PID=$!
17
+
18
+ # Attendi che Ollama sia avviato
19
+ echo "⏳ Waiting for Ollama to start..."
20
+ sleep 15
21
+
22
+ # Testa se Ollama è attivo
23
+ echo "🔍 Testing Ollama connection..."
24
+ if curl -s http://127.0.0.1:11434/api/version > /dev/null; then
25
+ echo "✅ Ollama is running!"
26
+ else
27
+ echo "❌ Ollama failed to start"
28
+ exit 1
29
+ fi
30
+
31
+ # Pull Jaksel model ( se non già presente )
32
+ echo "🤖 Checking for Jaksel model..."
33
+ if ! ollama list | grep -q "zantara-jaksel"; then
34
+ echo "📥 Pulling Jaksel model (this may take a few minutes)..."
35
+ ollama pull zantara-jaksel:latest
36
+ if [ $? -eq 0 ]; then
37
+ echo "✅ Jaksel model loaded successfully!"
38
+ else
39
+ echo "⚠️ Model download failed, but continuing..."
40
+ fi
41
+ else
42
+ echo "✅ Jaksel model already exists!"
43
+ fi
44
+
45
+ # Script Python per il proxy server
46
+ python3 <<PYTHON
47
+ import subprocess
48
+ import time
49
+ import requests
50
+ from fastapi import FastAPI, Request, HTTPException
51
+ import uvicorn
52
+ import json
53
+ import logging
54
+
55
+ # Setup logging
56
+ logging.basicConfig(level=logging.INFO)
57
+ logger = logging.getLogger(__name__)
58
+
59
+ app = FastAPI(title="Jaksel AI Proxy", version="1.0.0")
60
+
61
+ @app.get("/")
62
+ async def root():
63
+ return {"message": "Jaksel AI is running!", "status": "healthy"}
64
+
65
+ @app.get("/health")
66
+ async def health():
67
+ try:
68
+ response = requests.get("http://127.0.0.1:11434/api/tags", timeout=5)
69
+ if response.status_code == 200:
70
+ models = response.json().get("models", [])
71
+ jaksel_found = any("zantara-jaksel" in m.get("name", "") for m in models)
72
+ return {
73
+ "status": "healthy",
74
+ "ollama": "connected",
75
+ "jaksel_loaded": jaksel_found,
76
+ "models": [m.get("name") for m in models]
77
+ }
78
+ else:
79
+ return {"status": "unhealthy", "ollama": "error"}
80
+ except Exception as e:
81
+ return {"status": "unhealthy", "error": str(e)}
82
+
83
+ @app.post("/api/generate")
84
+ async def proxy_generate(request: Request):
85
+ """Proxy per richieste generate a Ollama"""
86
+ try:
87
+ body = await request.json()
88
+
89
+ # Log richiesta per debug
90
+ logger.info(f"Received /api/generate request: model={body.get('model', 'unknown')}")
91
+
92
+ ollama_url = "http://127.0.0.1:11434/api/generate"
93
+
94
+ response = requests.post(
95
+ ollama_url,
96
+ json=body,
97
+ timeout=120
98
+ )
99
+
100
+ return response.json()
101
+
102
+ except Exception as e:
103
+ logger.error(f"Error in /api/generate: {str(e)}")
104
+ return {
105
+ "error": f"Proxy error: {str(e)}",
106
+ "response": "Maaf, Jaksel lagi nggak bisa merespon. Coba lagi ya!"
107
+ }
108
+
109
+ @app.post("/api/chat")
110
+ async def proxy_chat(request: Request):
111
+ """Proxy per richieste chat a Ollama"""
112
+ try:
113
+ body = await request.json()
114
+
115
+ logger.info(f"Received /api/chat request: model={body.get('model', 'unknown')}")
116
+
117
+ ollama_url = "http://127.0.0.1:11434/api/chat"
118
+
119
+ response = requests.post(
120
+ ollama_url,
121
+ json=body,
122
+ timeout=120,
123
+ stream=body.get("stream", False)
124
+ )
125
+
126
+ if body.get("stream", False):
127
+ return response.raw
128
+ else:
129
+ return response.json()
130
+
131
+ except Exception as e:
132
+ logger.error(f"Error in /api/chat: {str(e)}")
133
+ return {
134
+ "error": f"Proxy error: {str(e)}",
135
+ "message": {"content": "Maaf, Jaksel lagi nggak bisa merespon. Coba lagi ya!"}
136
+ }
137
+
138
+ logger.info("🌐 Starting proxy server on port 7860...")
139
+ uvicorn.run(app, host="0.0.0.0", port=7860)
140
+ PYTHON
141
+
142
+ # Keep the script running
143
+ wait $OLLAMA_PID