File size: 2,651 Bytes
d19283f
 
 
 
 
 
 
 
 
5c2ea4b
d19283f
 
 
1e67d7e
e3065a2
1e67d7e
d19283f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b0b04a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1e67d7e
d19283f
 
 
cce15fc
 
d19283f
 
cce15fc
 
5c2ea4b
cce15fc
 
5c2ea4b
cce15fc
 
 
 
d19283f
65afcb7
 
 
 
 
cce15fc
65afcb7
d19283f
 
1e67d7e
 
 
 
 
 
 
 
 
 
 
6fb851a
1e67d7e
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
from fastapi import FastAPI
from pydantic import BaseModel
import joblib
import pandas as pd
import json

# Load models
scaler = joblib.load("scaler.joblib")
gmm = joblib.load("gmm_model.joblib")
pca = joblib.load("pca.joblib")
with open("cluster_fatigue_map.json") as f:
    cluster_fatigue_map = json.load(f)

# Load trained regressor
model = joblib.load("fatigue_model.joblib")

feature_cols = [
    "AVRR", "SDNN", "RMSSD", "PNN50", "Coefficient_of_Variation",
    "Age", "Weight", "Height"
]

# Define input schema
class FatigueInput(BaseModel):
    AVRR: float
    SDNN: float
    RMSSD: float
    PNN50: float
    Coefficient_of_Variation: float
    Age: int
    Weight: float
    Height: float

# Create FastAPI app
app = FastAPI()

@app.post("/debug")
def debug_predict(input_data: FatigueInput):
    input_dict = input_data.dict()
    input_df = pd.DataFrame([input_dict])[feature_cols]
    scaled_input = scaler.transform(input_df)
    pca_input = pca.transform(scaled_input)
    
    return {
        "raw_input": input_dict,
        "scaled_input": scaled_input.tolist(),
        "pca_input": pca_input.tolist(),
        "scaler_mean": scaler.mean_.tolist(),
        "scaler_scale": scaler.scale_.tolist()
    }

@app.post("/predictGMM")
def predict(input_data: FatigueInput):
    try:
        input_dict = input_data.dict()
        print("INPUT:", input_dict)

        input_df = pd.DataFrame([input_dict])[feature_cols]
        scaled_input = scaler.transform(input_df)
        print("SCALED:", scaled_input)

        pca_input = pca.transform(scaled_input)
        print("PCA:", pca_input)

        cluster = gmm.predict(pca_input)[0]
        probs = gmm.predict_proba(pca_input)[0].astype(float)
        print("CLUSTER:", cluster)
        print("PROBS:", probs)

        fatigue_level = cluster_fatigue_map[str(cluster)]

        return {
            "cluster": int(cluster),
            "fatigue_level": fatigue_level,
            "cluster_probabilities": {str(i): float(prob) for i, prob in enumerate(probs)},
            "weighted_fatigue_score": sum(float(probs[i]) * cluster_fatigue_map[str(i)] for i in range(len(probs)))
        }
    except Exception as e:
        return {"error": str(e)}

@app.post("/predict")
def predict(input_data: FatigueInput):
    try:
        input_df = pd.DataFrame([input_data.dict()])[feature_cols]

        # Predict fatigue level
        predicted_fatigue = model.predict(input_df)[0]

        return {
            "predicted_fatigue_level": float(predicted_fatigue),
            "fatigueLevel": int(round(predicted_fatigue))
        }
    except Exception as e:
        return {"error": str(e)}