File size: 1,535 Bytes
f0d9284
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import os
from flask import Flask, request, jsonify
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
from flask_cors import CORS

app = Flask(__name__)
CORS(app)

# --- CHANGE IS HERE ---
# Define the local directory path where your model files are located.
# The "." means the current directory.
local_model_path = "."
print(f"Loading model from local path: {local_model_path}")

# Load the model and tokenizer from the local directory
try:
    tokenizer = AutoTokenizer.from_pretrained(local_model_path)
    model = AutoModelForSequenceClassification.from_pretrained(local_model_path)
    classifier = pipeline("text-classification", model=model, tokenizer=tokenizer)
    print("Model loaded successfully.")
except Exception as e:
    print(f"Error loading model: {e}")
    # If the model fails to load, we can't run the app. Exit or handle appropriately.
    classifier = None

@app.route('/analyze', methods=['POST'])
def analyze_sentiment():
    if not classifier:
         return jsonify({"error": "Model could not be loaded. Check server logs."}), 500

    data = request.json
    text = data.get('text', '')

    if not text:
        return jsonify({"error": "No text provided"}), 400

    try:
        result = classifier(text)
        return jsonify(result)
    except Exception as e:
        print(f"Error during analysis: {e}")
        return jsonify({"error": str(e)}), 500

if __name__ == '__main__':
    port = int(os.environ.get("PORT", 8080))
    app.run(host='0.0.0.0', port=port)