Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,10 @@ from flask import Flask, request, jsonify
|
|
| 2 |
import os
|
| 3 |
import logging
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
logging.basicConfig(level=logging.INFO)
|
| 6 |
logger = logging.getLogger(__name__)
|
| 7 |
|
|
@@ -14,24 +18,36 @@ def load_model():
|
|
| 14 |
logger.info("Loading YOUR fine-tuned model...")
|
| 15 |
from transformers import pipeline
|
| 16 |
|
| 17 |
-
#
|
| 18 |
model = pipeline(
|
| 19 |
"text-generation",
|
| 20 |
model="kacperbb/phi-3.5-hf-finetuned",
|
| 21 |
-
trust_remote_code=True
|
|
|
|
| 22 |
)
|
| 23 |
logger.info("β
YOUR fine-tuned model loaded successfully!")
|
| 24 |
return True
|
| 25 |
except Exception as e:
|
| 26 |
logger.error(f"β Error loading your model: {e}")
|
| 27 |
-
logger.info("
|
| 28 |
try:
|
| 29 |
-
model = pipeline(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
logger.info("β
Base model loaded as fallback")
|
| 31 |
return True
|
| 32 |
except Exception as e2:
|
| 33 |
logger.error(f"β Fallback failed: {e2}")
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
@app.route('/generate', methods=['POST'])
|
| 37 |
def generate_text():
|
|
@@ -66,7 +82,7 @@ def health():
|
|
| 66 |
@app.route('/', methods=['GET'])
|
| 67 |
def home():
|
| 68 |
return jsonify({
|
| 69 |
-
"message": "
|
| 70 |
"model": "kacperbb/phi-3.5-hf-finetuned",
|
| 71 |
"endpoints": {
|
| 72 |
"generate": "POST /generate",
|
|
@@ -75,7 +91,7 @@ def home():
|
|
| 75 |
})
|
| 76 |
|
| 77 |
if __name__ == '__main__':
|
| 78 |
-
logger.info("Starting
|
| 79 |
load_model()
|
| 80 |
port = int(os.environ.get('PORT', 7860))
|
| 81 |
app.run(host='0.0.0.0', port=port, debug=False)
|
|
|
|
| 2 |
import os
|
| 3 |
import logging
|
| 4 |
|
| 5 |
+
# Set cache environment variables
|
| 6 |
+
os.environ['HF_HOME'] = '/.cache/huggingface'
|
| 7 |
+
os.environ['TRANSFORMERS_CACHE'] = '/.cache/huggingface/transformers'
|
| 8 |
+
|
| 9 |
logging.basicConfig(level=logging.INFO)
|
| 10 |
logger = logging.getLogger(__name__)
|
| 11 |
|
|
|
|
| 18 |
logger.info("Loading YOUR fine-tuned model...")
|
| 19 |
from transformers import pipeline
|
| 20 |
|
| 21 |
+
# Try to use your model with cache settings
|
| 22 |
model = pipeline(
|
| 23 |
"text-generation",
|
| 24 |
model="kacperbb/phi-3.5-hf-finetuned",
|
| 25 |
+
trust_remote_code=True,
|
| 26 |
+
cache_dir="/.cache/huggingface"
|
| 27 |
)
|
| 28 |
logger.info("β
YOUR fine-tuned model loaded successfully!")
|
| 29 |
return True
|
| 30 |
except Exception as e:
|
| 31 |
logger.error(f"β Error loading your model: {e}")
|
| 32 |
+
logger.info("Trying with base model...")
|
| 33 |
try:
|
| 34 |
+
model = pipeline(
|
| 35 |
+
"text-generation",
|
| 36 |
+
model="microsoft/Phi-3.5-mini-instruct",
|
| 37 |
+
trust_remote_code=True,
|
| 38 |
+
cache_dir="/.cache/huggingface"
|
| 39 |
+
)
|
| 40 |
logger.info("β
Base model loaded as fallback")
|
| 41 |
return True
|
| 42 |
except Exception as e2:
|
| 43 |
logger.error(f"β Fallback failed: {e2}")
|
| 44 |
+
# Last resort - use a smaller model
|
| 45 |
+
try:
|
| 46 |
+
model = pipeline("text-generation", model="microsoft/DialoGPT-medium")
|
| 47 |
+
logger.info("β
Smaller fallback model loaded")
|
| 48 |
+
return True
|
| 49 |
+
except:
|
| 50 |
+
return False
|
| 51 |
|
| 52 |
@app.route('/generate', methods=['POST'])
|
| 53 |
def generate_text():
|
|
|
|
| 82 |
@app.route('/', methods=['GET'])
|
| 83 |
def home():
|
| 84 |
return jsonify({
|
| 85 |
+
"message": "Phi 3.5 Fine-tuned API is running!",
|
| 86 |
"model": "kacperbb/phi-3.5-hf-finetuned",
|
| 87 |
"endpoints": {
|
| 88 |
"generate": "POST /generate",
|
|
|
|
| 91 |
})
|
| 92 |
|
| 93 |
if __name__ == '__main__':
|
| 94 |
+
logger.info("Starting Phi 3.5 API...")
|
| 95 |
load_model()
|
| 96 |
port = int(os.environ.get('PORT', 7860))
|
| 97 |
app.run(host='0.0.0.0', port=port, debug=False)
|