RathodHarish commited on
Commit
3be9769
·
verified ·
1 Parent(s): 84415a8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -144
app.py CHANGED
@@ -1,72 +1,48 @@
1
  from flask import Flask, request, jsonify
2
- from simple_salesforce import Salesforce
3
  import pandas as pd
4
- from datetime import datetime, timedelta
5
  import logging
6
  from sklearn.ensemble import IsolationForest
7
  from transformers import pipeline
8
  import torch
9
  import os
10
  import time
11
- import requests
12
- from requests.exceptions import Timeout
13
 
14
- # Configure logging to a file and console for better debugging
15
  logging.basicConfig(
16
  level=logging.INFO,
17
  format='%(asctime)s - %(levelname)s - %(message)s',
18
  handlers=[
19
- logging.FileHandler('app.log'), # Log to a file for persistence
20
- logging.StreamHandler() # Log to console for real-time visibility
21
  ]
22
  )
23
 
 
 
 
 
 
 
 
 
 
24
  # Initialize Flask app
25
  app = Flask(__name__)
26
-
27
- # Salesforce credentials (use environment variables for security)
28
- SF_USERNAME = os.getenv('SF_USERNAME', 'your_salesforce_username')
29
- SF_PASSWORD = os.getenv('SF_PASSWORD', 'your_salesforce_password')
30
- SF_SECURITY_TOKEN = os.getenv('SF_SECURITY_TOKEN', 'your_security_token')
31
- SF_INSTANCE_URL = os.getenv('SF_INSTANCE_URL', 'https://login.salesforce.com')
32
-
33
- # Global variable for Salesforce connection
34
- sf = None
35
 
36
  # Global variable for Hugging Face model (lazy initialization)
37
  summarizer = None
 
38
 
39
  # Health check endpoint to confirm the app is running
40
  @app.route('/health', methods=['GET'])
41
  def health_check():
42
  return jsonify({"status": "App is running"}), 200
43
 
44
- # Connect to Salesforce with a timeout
45
- def connect_to_salesforce():
46
- global sf
47
- logging.info("Attempting to connect to Salesforce...")
48
- start_time = time.time()
49
- try:
50
- # Use a timeout to prevent hanging
51
- session = requests.Session()
52
- adapter = requests.adapters.HTTPAdapter(max_retries=3)
53
- session.mount('https://', adapter)
54
- session.request('GET', SF_INSTANCE_URL, timeout=10) # Test connectivity
55
-
56
- sf = Salesforce(
57
- username=SF_USERNAME,
58
- password=SF_PASSWORD,
59
- security_token=SF_SECURITY_TOKEN,
60
- instance_url=SF_INSTANCE_URL,
61
- session=session
62
- )
63
- logging.info(f"Connected to Salesforce successfully in {time.time() - start_time:.2f} seconds")
64
- except Timeout:
65
- logging.error("Salesforce connection timed out after 10 seconds")
66
- sf = None
67
- except Exception as e:
68
- logging.error(f"Failed to connect to Salesforce: {str(e)}")
69
- sf = None
70
 
71
  # Lazy load the Hugging Face model
72
  def load_huggingface_model():
@@ -82,57 +58,26 @@ def load_huggingface_model():
82
  logging.error(f"Failed to load Hugging Face model: {str(e)}")
83
  summarizer = None
84
 
85
- # Fetch SmartLog records from Salesforce
86
  def fetch_smartlog_records(lab_site, start_date, end_date, equipment_type):
87
- if sf is None:
88
- raise Exception("Salesforce connection not established")
89
- try:
90
- logging.info("Fetching SmartLog records from Salesforce...")
91
- query = "SELECT Device_Id__c, Log_Type__c, Status__c, Timestamp__c, Usage_Hours__c, Downtime__c, AMC_Date__c FROM SmartLog__c WHERE "
92
- conditions = []
93
- params = {}
94
- if lab_site:
95
- conditions.append("Lab_Site__c = :lab_site")
96
- params['lab_site'] = lab_site
97
- if start_date:
98
- conditions.append("Timestamp__c >= :start_date")
99
- params['start_date'] = start_date
100
- if end_date:
101
- conditions.append("Timestamp__c <= :end_date")
102
- params['end_date'] = end_date
103
- if equipment_type:
104
- conditions.append("Log_Type__c = :equipment_type")
105
- params['equipment_type'] = equipment_type
106
-
107
- if not conditions:
108
- query = query.replace(" WHERE ", "")
109
- else:
110
- query += " AND ".join(conditions)
111
-
112
- # Execute SOQL query
113
- result = sf.query_all(query, **params)
114
- records = result['records']
115
-
116
- # Convert records to a DataFrame
117
- data = []
118
- for record in records:
119
- data.append({
120
- 'device_id': record['Device_Id__c'],
121
- 'log_type': record['Log_Type__c'],
122
- 'status': record['Status__c'],
123
- 'timestamp': record['Timestamp__c'],
124
- 'usage_hours': record['Usage_Hours__c'],
125
- 'downtime': record['Downtime__c'],
126
- 'amc_date': record['AMC_Date__c']
127
- })
128
- df = pd.DataFrame(data)
129
- df['timestamp'] = pd.to_datetime(df['timestamp'], errors='coerce')
130
- df['amc_date'] = pd.to_datetime(df['amc_date'], errors='coerce')
131
- logging.info(f"Fetched {len(df)} SmartLog records")
132
- return df
133
- except Exception as e:
134
- logging.error(f"Failed to fetch SmartLog records: {str(e)}")
135
- raise e
136
 
137
  # Format summary prompt and generate report
138
  def summarize_logs(df):
@@ -234,9 +179,6 @@ def create_usage_chart_data(df):
234
  @app.route('/process_logs', methods=['POST'])
235
  def process_logs():
236
  try:
237
- if sf is None:
238
- return jsonify({"error": "Salesforce connection not established. Check server logs for details."}), 500
239
-
240
  data = request.get_json()
241
  lab_site = data.get('lab_site')
242
  start_date = data.get('start_date')
@@ -244,10 +186,10 @@ def process_logs():
244
  equipment_type = data.get('equipment_type')
245
  amc_threshold = data.get('amc_threshold', 30)
246
 
247
- # Fetch SmartLog records from Salesforce
248
  df = fetch_smartlog_records(lab_site, start_date, end_date, equipment_type)
249
  if df.empty:
250
- return jsonify({"error": "No data available in SmartLog__c."}), 400
251
 
252
  # Step 1: Summary Report
253
  summary = summarize_logs(df)
@@ -255,50 +197,4 @@ def process_logs():
255
  # Step 2: Log Preview (First 5 Rows)
256
  preview_lines = []
257
  for idx, row in df.head().iterrows():
258
- preview_lines.append({
259
- "row": idx + 1,
260
- "device_id": row['device_id'],
261
- "timestamp": row['timestamp'].isoformat() if pd.notnull(row['timestamp']) else None,
262
- "usage_hours": float(row['usage_hours']) if pd.notnull(row['usage_hours']) else 0,
263
- "downtime": float(row['downtime']) if pd.notnull(row['downtime']) else 0,
264
- "amc_date": row['amc_date'].isoformat() if pd.notnull(row['amc_date']) else None
265
- })
266
-
267
- # Step 3: Usage Chart (Textual Data)
268
- chart_data = create_usage_chart_data(df)
269
-
270
- # Step 4: Anomaly Detection
271
- anomaly_lines, anomaly_error = detect_anomalies(df)
272
- if anomaly_error:
273
- anomaly_lines = [{"error": anomaly_error}]
274
-
275
- # Step 5: AMC Reminders
276
- reminder_lines, reminder_error = check_amc_reminders(df, datetime.now())
277
- if reminder_error:
278
- reminder_lines = [{"error": reminder_error}]
279
-
280
- # Step 6: Dashboard Insights
281
- insights = generate_dashboard_insights(df)
282
-
283
- # Prepare the response
284
- response = {
285
- "summary": summary,
286
- "log_preview": preview_lines,
287
- "usage_chart": chart_data,
288
- "anomalies": anomaly_lines,
289
- "amc_reminders": reminder_lines,
290
- "insights": insights
291
- }
292
-
293
- return jsonify(response), 200
294
-
295
- except Exception as e:
296
- logging.error(f"Failed to process logs: {str(e)}")
297
- return jsonify({"error": str(e)}), 500
298
-
299
- if __name__ == "__main__":
300
- logging.info("Starting Flask application...")
301
- start_time = time.time()
302
- connect_to_salesforce() # Attempt to connect to Salesforce at startup
303
- logging.info(f"Flask application startup completed in {time.time() - start_time:.2f} seconds")
304
- app.run(host="0.0.0.0", port=5000, debug=True)
 
1
  from flask import Flask, request, jsonify
 
2
  import pandas as pd
3
+ from datetime import datetime
4
  import logging
5
  from sklearn.ensemble import IsolationForest
6
  from transformers import pipeline
7
  import torch
8
  import os
9
  import time
10
+ import sys
 
11
 
12
+ # Configure logging to console first (force output even if file logging fails)
13
  logging.basicConfig(
14
  level=logging.INFO,
15
  format='%(asctime)s - %(levelname)s - %(message)s',
16
  handlers=[
17
+ logging.StreamHandler(sys.stdout) # Force console output
 
18
  ]
19
  )
20
 
21
+ # Add file handler for logging (if possible)
22
+ try:
23
+ file_handler = logging.FileHandler('app.log')
24
+ file_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
25
+ logging.getLogger().addHandler(file_handler)
26
+ logging.info("File logging enabled successfully")
27
+ except Exception as e:
28
+ logging.warning(f"Failed to enable file logging: {str(e)}. Continuing with console logging only.")
29
+
30
  # Initialize Flask app
31
  app = Flask(__name__)
32
+ logging.info("Flask app initialized")
 
 
 
 
 
 
 
 
33
 
34
  # Global variable for Hugging Face model (lazy initialization)
35
  summarizer = None
36
+ logging.info("Hugging Face model set to lazy initialization")
37
 
38
  # Health check endpoint to confirm the app is running
39
  @app.route('/health', methods=['GET'])
40
  def health_check():
41
  return jsonify({"status": "App is running"}), 200
42
 
43
+ # Dummy Salesforce connection placeholder (disabled for now)
44
+ sf = None
45
+ logging.info("Salesforce connection disabled for troubleshooting")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  # Lazy load the Hugging Face model
48
  def load_huggingface_model():
 
58
  logging.error(f"Failed to load Hugging Face model: {str(e)}")
59
  summarizer = None
60
 
61
+ # Dummy fetch function (since Salesforce is disabled)
62
  def fetch_smartlog_records(lab_site, start_date, end_date, equipment_type):
63
+ logging.info("Salesforce connection is disabled. Returning dummy data for testing.")
64
+ # Return a small dummy DataFrame to test processing
65
+ data = [
66
+ {
67
+ 'device_id': 'D001',
68
+ 'log_type': 'SmartLog',
69
+ 'status': 'OK',
70
+ 'timestamp': '2025-05-14T10:15:00Z',
71
+ 'usage_hours': 5.0,
72
+ 'downtime': 0.0,
73
+ 'amc_date': '2025-06-15'
74
+ }
75
+ ]
76
+ df = pd.DataFrame(data)
77
+ df['timestamp'] = pd.to_datetime(df['timestamp'], errors='coerce')
78
+ df['amc_date'] = pd.to_datetime(df['amc_date'], errors='coerce')
79
+ logging.info(f"Returning dummy DataFrame with {len(df)} records")
80
+ return df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
 
82
  # Format summary prompt and generate report
83
  def summarize_logs(df):
 
179
  @app.route('/process_logs', methods=['POST'])
180
  def process_logs():
181
  try:
 
 
 
182
  data = request.get_json()
183
  lab_site = data.get('lab_site')
184
  start_date = data.get('start_date')
 
186
  equipment_type = data.get('equipment_type')
187
  amc_threshold = data.get('amc_threshold', 30)
188
 
189
+ # Fetch SmartLog records (using dummy data for now)
190
  df = fetch_smartlog_records(lab_site, start_date, end_date, equipment_type)
191
  if df.empty:
192
+ return jsonify({"error": "No data available."}), 400
193
 
194
  # Step 1: Summary Report
195
  summary = summarize_logs(df)
 
197
  # Step 2: Log Preview (First 5 Rows)
198
  preview_lines = []
199
  for idx, row in df.head().iterrows():
200
+ preview