Shivenduu commited on
Commit
b20cbbf
·
1 Parent(s): 7415fbc

Initial commit for Flask Docker Space

Browse files
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use a base image with Python and CUDA if you use torch GPU (or just python:3.10)
2
+ FROM python:3.10-slim
3
+
4
+ # Set work directory
5
+ WORKDIR /app
6
+
7
+ # Copy your code
8
+ COPY . /app
9
+
10
+ # Install dependencies
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Expose the Flask port
14
+ EXPOSE 7860
15
+
16
+ # Start the Flask app
17
+ CMD ["python", "app.py"]
app.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from flask_cors import CORS
3
+ import os
4
+ import pandas as pd
5
+ # from utils.analytics import get_summary_stats
6
+ from model.mood_predict import mood_predict
7
+ from utils.mapping import map_to_mood
8
+ import datetime
9
+
10
+ app = Flask(__name__)
11
+
12
+ CORS(app)
13
+
14
+ # --- Data File ---
15
+ DATA_DIR = 'data'
16
+ LOGS_CSV = os.path.join(DATA_DIR, 'logs.csv')
17
+ LOG_COLUMNS = ['date', 'exercise', 'water', 'reading', 'meditation', 'mood', 'journal_text']
18
+
19
+ # --- Helper Function ---
20
+ def init_log_file():
21
+ """Creates the log file with headers if it doesn't exist."""
22
+ if not os.path.exists("data"):
23
+ os.makedirs("data")
24
+ if not os.path.exists(LOGS_CSV):
25
+ df = pd.DataFrame(columns=["date", "exercise", "water", "reading", "meditation", "mood", "journal_text"])
26
+ df.to_csv(LOGS_CSV, index=False)
27
+
28
+
29
+ # --- API Endpoints ---
30
+
31
+ @app.route("/")
32
+ def home():
33
+ return jsonify({'message': "MindTrack Backend is running!"}), 201
34
+
35
+ @app.route("/log", methods=["POST"])
36
+ def log_habit():
37
+ """
38
+ Saves a new log entry.
39
+ This is now a "read-modify-write" operation to handle
40
+ duplicate dates (overwrite) and file creation.
41
+ """
42
+ new_log_data = request.json
43
+
44
+ if not new_log_data or 'date' not in new_log_data:
45
+ return jsonify({"error": "No data or date provided"}), 400
46
+
47
+ try:
48
+ # 1. Ensure the data directory exists
49
+ os.makedirs(DATA_DIR, exist_ok=True)
50
+
51
+ # 2. Load existing data if file exists
52
+ if os.path.exists(LOGS_CSV):
53
+ df = pd.read_csv(LOGS_CSV)
54
+ else:
55
+ # Create an empty DataFrame WITH the correct columns
56
+ df = pd.DataFrame(columns=LOG_COLUMNS)
57
+
58
+ # 3. Check for and remove duplicate date (for overwrite)
59
+ new_date = new_log_data['date']
60
+ if not df.empty and new_date in df['date'].values:
61
+ print(f"Duplicate date found: {new_date}. Overwriting old entry.")
62
+ df = df[df['date'] != new_date] # Keep all rows *except* the one with the duplicate date
63
+
64
+ # 4. Create a DataFrame for the new entry, ensuring it also has all columns
65
+ df_entry = pd.DataFrame([new_log_data], columns=LOG_COLUMNS)
66
+
67
+ # 5. Append new entry to the (potentially filtered) DataFrame
68
+ df_updated = pd.concat([df, df_entry], ignore_index=True)
69
+
70
+ # 6. Sort by date for consistency (optional but good practice)
71
+ df_updated = df_updated.sort_values(by='date')
72
+
73
+ # 7. Save the entire updated DataFrame back to the CSV
74
+ # header=True is the default and correct
75
+ df_updated.to_csv(LOGS_CSV, index=False)
76
+
77
+ return jsonify({"message": "Log saved successfully"}), 201
78
+
79
+ except Exception as e:
80
+ print(f"Error saving log: {e}")
81
+ return jsonify({"error": "Failed to save log"}), 500
82
+
83
+
84
+ @app.route('/get_all_logs', methods=['GET'])
85
+ def get_all_logs():
86
+ """
87
+ Reads all log entries from the CSV and returns them as JSON.
88
+ This is the "Single Source of Truth" endpoint for the dashboard.
89
+ """
90
+ if not os.path.exists(LOGS_CSV):
91
+ # If the file doesn't exist yet, just return an empty list
92
+ return jsonify([])
93
+
94
+ try:
95
+ df = pd.read_csv(LOGS_CSV)
96
+
97
+ # Convert DataFrame to JSON (orient='records' gives a list of dicts)
98
+ logs_json = df.to_dict(orient='records')
99
+
100
+ return jsonify(logs_json)
101
+
102
+ except Exception as e:
103
+ print(f"Error reading logs: {e}")
104
+ return jsonify({"error": "Failed to retrieve logs"}), 500
105
+
106
+
107
+ @app.route("/predict_mood", methods=["POST"])
108
+ def predict_mood():
109
+ """
110
+ Predicts the sentiment of a given journal text.
111
+ """
112
+ try:
113
+ data = request.json
114
+ text = data.get("text")
115
+
116
+ if not text or text.strip() == "":
117
+ return jsonify({"error": "No text provided"}), 400
118
+
119
+ pred_moods = mood_predict(text)
120
+ mood = pred_moods.get('label', 'Neutral')
121
+ score = pred_moods.get('score', 0.0)
122
+
123
+ mapped_mood = map_to_mood(mood)
124
+
125
+ return jsonify({
126
+ "mood": mapped_mood,
127
+ "score": score
128
+ }), 200
129
+
130
+ except Exception as e:
131
+ app.logger.error(f"Error in /predict_mood: {e}")
132
+ return jsonify({"error": str(e)}), 500
133
+
134
+
135
+ @app.route('/reset_logs', methods=['POST'])
136
+ def reset_logs():
137
+ """
138
+ Deletes the logs.csv file to reset the dashboard to sample data.
139
+ This is for demo purposes.
140
+ """
141
+ try:
142
+ if os.path.exists(LOGS_CSV):
143
+ os.remove(LOGS_CSV)
144
+ print("logs.csv has been deleted.")
145
+ return jsonify({"message": "Log file deleted successfully. Dashboard will reset to sample data."}), 200
146
+ else:
147
+ print("logs.csv not found, no action needed.")
148
+ return jsonify({"message": "No log file to delete."}), 200
149
+ except Exception as e:
150
+ print(f"Error deleting log file: {e}")
151
+ return jsonify({"error": f"Failed to delete log file: {e}"}), 500
152
+
153
+
154
+ # --- Main execution ---
155
+ if __name__ == "__main__":
156
+ init_log_file() # Ensure log file exists on startup
157
+ # For Render deployment, Render sets the PORT env variable.
158
+ # port = int(os.environ.get("PORT", 5000))
159
+ app.run(host="0.0.0.0", port=7860, debug=True) # Debug=True is fine for hackathon
model/__pycache__/mood_predict.cpython-312.pyc ADDED
Binary file (661 Bytes). View file
 
model/mood_predict.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+
3
+ # Load a BERT emotion classifier
4
+ emotion_classifier = pipeline(
5
+ "text-classification",
6
+ model="j-hartmann/emotion-english-distilroberta-base",
7
+ return_all_scores=False,
8
+ top_k = 1
9
+ )
10
+
11
+ def mood_predict(text: str):
12
+ preds = emotion_classifier(text)[0]
13
+ print(preds)
14
+ return preds[0] # return dict like content
15
+
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ flask
2
+ flask-cors
3
+ transformers
4
+ torch
5
+ pandas
6
+ huggingface_hub[hf_xet]
7
+ gunicorn
utils/__pycache__/analytics.cpython-312.pyc ADDED
Binary file (190 Bytes). View file
 
utils/__pycache__/mapping.cpython-312.pyc ADDED
Binary file (491 Bytes). View file
 
utils/analytics.py ADDED
File without changes
utils/mapping.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def map_to_mood(label):
2
+ mapping = {
3
+ 'joy': 'Happy',
4
+ 'neutral': 'Neutral',
5
+ 'sadness': 'Sad',
6
+ 'anger': 'Angry',
7
+ 'disgust': 'Angry',
8
+ 'fear': 'Sad',
9
+ 'surprise': 'Neutral'
10
+ }
11
+ return mapping.get(label, 'Neutral')