SreyaDvn commited on
Commit
1003b73
Β·
verified Β·
1 Parent(s): 8b6ddff

Upload 6 files

Browse files
Files changed (6) hide show
  1. .gitignore +30 -0
  2. Dockerfile +18 -0
  3. Procfile +1 -0
  4. app.py +111 -0
  5. predictor.py +44 -0
  6. requirements.txt +0 -0
.gitignore ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ignore Python cache and bytecode
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.so
5
+
6
+ # Ignore virtual environment
7
+ venv/
8
+
9
+ # Ignore saved model files
10
+ saved_model/
11
+ *.safetensors
12
+ *.pt
13
+ *.bin
14
+ *.ckpt
15
+
16
+ # Ignore logs or runtime data
17
+ *.log
18
+ *.csv
19
+ *.tmp
20
+ *.DS_Store
21
+
22
+ # Ignore Jupyter Notebook checkpoints
23
+ .ipynb_checkpoints/
24
+
25
+ # Ignore environment files if sensitive
26
+ .env
27
+
28
+ *.pyc
29
+ *.pyo
30
+ *.pyd
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a slim Python image
2
+ FROM python:3.10-slim
3
+
4
+ # Set working directory inside the container
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements file and install dependencies
8
+ COPY requirements.txt .
9
+ RUN pip install --upgrade pip && pip install -r requirements.txt
10
+
11
+ # Copy all app files to the container
12
+ COPY . .
13
+
14
+ # Expose the port the app will run on (Hugging Face requires 7860)
15
+ EXPOSE 7860
16
+
17
+ # Run the Flask app
18
+ CMD ["python", "app.py"]
Procfile ADDED
@@ -0,0 +1 @@
 
 
1
+ web: gunicorn app:app
app.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, request, redirect
2
+ import pandas as pd
3
+ from predictor import predict_sentiment
4
+
5
+ app = Flask(__name__)
6
+
7
+ # πŸ”˜ Label mapping
8
+ LABEL_MAP = {
9
+ "LABEL_0": "Negative",
10
+ "LABEL_1": "Positive"
11
+ }
12
+
13
+ # πŸ”˜ Root β†’ redirect to single review page
14
+ @app.route("/")
15
+ def root():
16
+ return redirect("/sentiment-review/single")
17
+
18
+ # πŸ”˜ Single review input route
19
+ @app.route("/sentiment-review/single", methods=["GET", "POST"])
20
+ def single_review():
21
+ prediction = None
22
+ confidence = None
23
+ review = ""
24
+ chosen_model = None
25
+
26
+ if request.method == "POST":
27
+ review = request.form.get("review", "").strip()
28
+ if review:
29
+ try:
30
+ result = predict_sentiment(review)
31
+
32
+ raw_label = result["prediction"].get("label")
33
+ score = result["prediction"].get("score", 0.0)
34
+ chosen_model = result.get("chosen_model", "N/A")
35
+
36
+ prediction = LABEL_MAP.get(raw_label, raw_label)
37
+ confidence = round(float(score) * 100, 2)
38
+
39
+ except Exception as e:
40
+ print("❌ Single Review Processing Error:", e)
41
+ prediction = "Error"
42
+ confidence = 0.0
43
+ chosen_model = "N/A"
44
+
45
+ return render_template(
46
+ "index.html",
47
+ prediction=prediction,
48
+ confidence=confidence,
49
+ review=review,
50
+ chosen_model=chosen_model
51
+ )
52
+
53
+ # πŸ“ Batch upload route
54
+ @app.route("/sentiment-review/batch", methods=["GET", "POST"])
55
+ def batch_review():
56
+ if request.method == "POST":
57
+ if 'csvfile' not in request.files:
58
+ return render_template("batch.html", error="No file part found.")
59
+
60
+ file = request.files['csvfile']
61
+ if not file.filename:
62
+ return render_template("batch.html", error="No selected file.")
63
+
64
+ if file and file.filename.endswith(".csv"):
65
+ try:
66
+ df = pd.read_csv(file, encoding="utf-8")
67
+ if "review" not in df.columns:
68
+ return render_template("batch.html", error="CSV must have a 'review' column.")
69
+
70
+ results = []
71
+ for i, text in enumerate(df["review"].fillna("").tolist()):
72
+ try:
73
+ result = predict_sentiment(text)
74
+
75
+ raw_label = result["prediction"].get("label")
76
+ score = result["prediction"].get("score", 0.0)
77
+ chosen_model = result.get("chosen_model", "N/A")
78
+
79
+ sentiment = LABEL_MAP.get(raw_label, raw_label)
80
+ confidence = round(float(score) * 100, 2)
81
+
82
+ print(f"🧠 Review {i+1}: {text[:40]}... β†’ {sentiment} ({confidence}%) [Model: {chosen_model}]")
83
+
84
+ results.append({
85
+ "text": text,
86
+ "sentiment": sentiment,
87
+ "confidence": confidence,
88
+ "chosen_model": chosen_model
89
+ })
90
+ except Exception as inner_e:
91
+ print(f"⚠️ Error processing review {i+1}: {inner_e}")
92
+ results.append({
93
+ "text": text,
94
+ "sentiment": "Error",
95
+ "confidence": 0.0,
96
+ "chosen_model": "N/A"
97
+ })
98
+
99
+ return render_template("batch.html", results=results)
100
+
101
+ except Exception as e:
102
+ print("❌ CSV Processing error:", e)
103
+ return render_template("batch.html", error=f"Processing error: {str(e)}")
104
+
105
+ return render_template("batch.html", error="Invalid file format. Upload .csv only.")
106
+
107
+ return render_template("batch.html")
108
+
109
+
110
+ if __name__ == "__main__":
111
+ app.run(host="0.0.0.0", port=7860, debug=True)
predictor.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
2
+ import torch
3
+
4
+ # βœ… Load all three models
5
+ model_names = {
6
+ "label0": "SreyaDvn/savedModelLebel0",
7
+ "label1": "SreyaDvn/savedModelLebel1",
8
+ "balanced": "SreyaDvn/sentiment-model"
9
+ }
10
+
11
+ pipelines = {}
12
+ for name, path in model_names.items():
13
+ tokenizer = AutoTokenizer.from_pretrained(path)
14
+ model = AutoModelForSequenceClassification.from_pretrained(path)
15
+ pipelines[name] = pipeline(
16
+ "text-classification",
17
+ model=model,
18
+ tokenizer=tokenizer,
19
+ device=0 if torch.cuda.is_available() else -1
20
+ )
21
+
22
+ print("βœ… All models loaded successfully!")
23
+
24
+
25
+ def predict_sentiment(text: str):
26
+ """
27
+ Runs input text through all models,
28
+ then selects the best model by IF-ELSE logic.
29
+ """
30
+
31
+ results = {}
32
+ for name, pipe in pipelines.items():
33
+ out = pipe(text, truncation=True)[0] # e.g. {'label': 'LABEL_1', 'score': 0.92}
34
+ results[name] = out
35
+
36
+ # ---- IF-ELSE LOGIC ----
37
+ # Currently: Pick the prediction with the HIGHEST confidence score
38
+ best_model = max(results, key=lambda k: results[k]['score'])
39
+
40
+ return {
41
+ "chosen_model": best_model,
42
+ "prediction": results[best_model],
43
+ "all_results": results
44
+ }
requirements.txt ADDED
Binary file (1.24 kB). View file