Rudraaaa76's picture
Update app.py
0623b68 verified
# from flask import Flask, request, jsonify
# from flask_cors import CORS
# import numpy as np
# import pandas as pd
# import joblib
# import requests
# import os
# import tensorflow as tf # Required for TFLite interpreter
# # URLs for hosted files
# # tflite_model_url = "https://drive.google.com/uc?id=1j5JU2xD2iwi5STzjH2gKILAbekiKWBkp&export=download"
# # scaler_url = "https://drive.google.com/uc?id=1Qu2ogpNw8MqPbstNpcEbO0oRDQ56qX5X&export=download"
# # label_encoder_url = "https://drive.google.com/uc?id=1qYi5agK5vDKc-k6UcaRt7yL9AFJ3O_7-&export=download"
# # Local file paths
# tflite_model_path = "motion_classification_model.tflite"
# scaler_path = "scaler.pkl"
# label_encoder_path = "label_encoder.pkl"
# # Function to download files if they don't exist locally
# # def download_file(url, local_path):
# # if not os.path.exists(local_path):
# # print(f"Downloading {local_path} from {url}...")
# # response = requests.get(url)
# # with open(local_path, 'wb') as file:
# # file.write(response.content)
# # print(f"Downloaded {local_path}")
# # Download required files
# # download_file(tflite_model_url, tflite_model_path)
# # download_file(scaler_url, scaler_path)
# # download_file(label_encoder_url, label_encoder_path)
# # Load the scaler and label encoder
# scaler = joblib.load(scaler_path)
# label_encoder = joblib.load(label_encoder_path)
# # Initialize the TFLite interpreter
# interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
# interpreter.allocate_tensors()
# # Get input and output tensor details
# input_details = interpreter.get_input_details()
# output_details = interpreter.get_output_details()
# # Feature Columns
# feature_columns = ['AccX', 'AccY', 'AccZ', 'GyroX', 'GyroY', 'GyroZ']
# sequence_length = 50
# # Initialize Flask App
# app = Flask(__name__)
# CORS(app)
# @app.route('/')
# def home():
# return "Welcome to driving behavior analysis"
# # Define a route for the prediction function
# @app.route('/predict', methods=['POST'])
# def predict_behavior():
# try:
# # Get the data from the request
# data = request.json
# # Convert the data to a DataFrame
# df = pd.DataFrame(data)
# # Validate required columns
# if not all(col in df.columns for col in feature_columns):
# return jsonify({'error': 'Missing columns'}), 400
# # Scale the data
# df[feature_columns] = scaler.transform(df[feature_columns])
# # Create sequences
# sequences = []
# if len(df) >= sequence_length:
# # If enough data for full sequences
# for i in range(len(df) - sequence_length + 1):
# seq = df.iloc[i:i+sequence_length][feature_columns].values
# sequences.append(seq)
# else:
# # Pad the data if it's smaller than the sequence length
# padded_data = np.pad(
# df[feature_columns].values,
# ((sequence_length - len(df), 0), (0, 0)), # Pad missing rows
# mode='constant',
# constant_values=0
# )
# sequences.append(padded_data)
# # Convert to NumPy array
# X_input = np.array(sequences, dtype=np.float32)
# # Make predictions using TFLite model
# predictions = []
# for seq in X_input:
# # Prepare the input tensor
# interpreter.set_tensor(input_details[0]['index'], [seq])
# interpreter.invoke()
# # Get the output tensor
# output_data = interpreter.get_tensor(output_details[0]['index'])
# predictions.append(output_data)
# # Get predicted classes
# predicted_classes = np.argmax(predictions, axis=2).flatten()
# # Convert integers to class labels
# class_labels = label_encoder.inverse_transform(predicted_classes)
# # Calculate class frequencies
# unique_classes, counts = np.unique(class_labels, return_counts=True)
# max_count = np.max(counts)
# most_frequent_classes = unique_classes[counts == max_count]
# # Select the first class in case of ties
# most_frequent_class = most_frequent_classes[0] # Select the first class alphabetically
# # Return the predicted class labels and the most frequent class
# return jsonify({
# "predicted_classes": list(class_labels), # Full list of predictions
# "most_frequent_class": most_frequent_class
# })
# except Exception as e:
# return jsonify({'error': str(e)}), 500
# if __name__ == '__main__':
# app.run(host="0.0.0.0", port=7860)
from flask import Flask, request, jsonify
from flask_cors import CORS
import numpy as np
import pandas as pd
import joblib
import tensorflow as tf # Required for TFLite interpreter
# Local file paths
tflite_model_path = "motion_classification_model.tflite"
scaler_path = "scaler.pkl"
label_encoder_path = "label_encoder.pkl"
# Load the scaler and label encoder
scaler = joblib.load(scaler_path)
label_encoder = joblib.load(label_encoder_path)
# Initialize the TFLite interpreter
interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
interpreter.allocate_tensors()
# Get input and output tensor details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Feature Columns
feature_columns = ['AccX', 'AccY', 'AccZ', 'GyroX', 'GyroY', 'GyroZ']
sequence_length = 50
# Initialize Flask App
app = Flask(__name__)
CORS(app)
@app.route('/')
def home():
return "Welcome to driving behavior analysis API"
@app.route('/predict', methods=['POST'])
def predict_behavior():
try:
# Get data from the request
data = request.json
df = pd.DataFrame(data)
# Validate required columns
if not all(col in df.columns for col in feature_columns):
return jsonify({'error': 'Missing required sensor columns'}), 400
# Scale the data
df[feature_columns] = scaler.transform(df[feature_columns])
# Compute Jerk (Rate of Change of Acceleration)
df['JerkX'] = df['AccX'].diff().fillna(0)
df['JerkY'] = df['AccY'].diff().fillna(0)
df['JerkZ'] = df['AccZ'].diff().fillna(0)
# Identify Harsh Braking (Sudden drop in AccX)
harsh_braking_events = (df['JerkX'] < -3).sum() # Adjusted threshold from -5 to -3
# Identify Harsh Cornering (Sharp change in GyroZ)
harsh_cornering_events = (df['GyroZ'].diff().abs() > 1.8).sum() # Adjusted threshold from 30 to 1.5
# Create sequences for model input
sequences = []
if len(df) >= sequence_length:
for i in range(len(df) - sequence_length + 1):
seq = df.iloc[i:i+sequence_length][feature_columns].values
sequences.append(seq)
else:
# Pad if data is smaller than sequence length
padded_data = np.pad(
df[feature_columns].values,
((sequence_length - len(df), 0), (0, 0)),
mode='constant',
constant_values=0
)
sequences.append(padded_data)
# Convert to NumPy array
X_input = np.array(sequences, dtype=np.float32)
# Predict using TFLite model
predictions = []
for seq in X_input:
interpreter.set_tensor(input_details[0]['index'], [seq])
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
predictions.append(output_data)
# Get predicted classes
predicted_classes = np.argmax(predictions, axis=2).flatten()
class_labels = label_encoder.inverse_transform(predicted_classes)
# Find most frequent class
unique_classes, counts = np.unique(class_labels, return_counts=True)
most_frequent_class = unique_classes[np.argmax(counts)]
# Return results
return jsonify({
"predicted_classes": list(class_labels),
"most_frequent_class": most_frequent_class,
"harsh_braking_count": int(harsh_braking_events),
"harsh_cornering_count": int(harsh_cornering_events)
})
except Exception as e:
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
app.run(host="0.0.0.0", port=7860)