leojoseph27 commited on
Commit
e35abe8
·
1 Parent(s): 41df22a

Add ECG anomaly detection model files

Browse files
best_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b2a8ed391e3151a21bc6ea7ba261b1cd81a3bcfbdb1ee538b47aff2e0ad8c09
3
+ size 30876194
detecting_anomaly_in_ecg_data_using_autoencoder_with_pytorch.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import numpy as np
4
+
5
+ class Encoder(nn.Module):
6
+ def __init__(self, seq_len, n_features, hidden_size=512):
7
+ super(Encoder, self).__init__()
8
+ self.seq_len = seq_len
9
+ self.n_features = n_features
10
+ self.hidden_size = hidden_size
11
+
12
+ self.rnn1 = nn.LSTM(
13
+ input_size=n_features,
14
+ hidden_size=hidden_size,
15
+ num_layers=1,
16
+ batch_first=True
17
+ )
18
+
19
+ self.rnn2 = nn.LSTM(
20
+ input_size=hidden_size,
21
+ hidden_size=hidden_size,
22
+ num_layers=1,
23
+ batch_first=True
24
+ )
25
+
26
+ def forward(self, x):
27
+ # Debug input shape
28
+ print(f"Encoder input shape BEFORE processing: {x.shape}")
29
+
30
+ # Ensure input has correct dimensions
31
+ if len(x.shape) == 2: # (batch_size, seq_len)
32
+ x = x.unsqueeze(-1) # Add feature dimension
33
+ elif len(x.shape) == 3: # (batch_size, seq_len, features)
34
+ if x.shape[1] != self.seq_len or x.shape[2] != self.n_features:
35
+ # Reshape to (batch_size, seq_len, features)
36
+ x = x.reshape(x.shape[0], self.seq_len, self.n_features)
37
+
38
+ print(f"Encoder input shape AFTER processing: {x.shape}")
39
+
40
+ x, (hidden_n, cell_n) = self.rnn1(x)
41
+ x, (hidden_n, cell_n) = self.rnn2(x)
42
+
43
+ return x, (hidden_n, cell_n)
44
+
45
+ class Decoder(nn.Module):
46
+ def __init__(self, seq_len, n_features, hidden_size=512):
47
+ super(Decoder, self).__init__()
48
+ self.seq_len = seq_len
49
+ self.n_features = n_features
50
+ self.hidden_size = hidden_size
51
+
52
+ self.rnn1 = nn.LSTM(
53
+ input_size=hidden_size,
54
+ hidden_size=hidden_size,
55
+ num_layers=1,
56
+ batch_first=True
57
+ )
58
+
59
+ self.rnn2 = nn.LSTM(
60
+ input_size=hidden_size,
61
+ hidden_size=hidden_size,
62
+ num_layers=1,
63
+ batch_first=True
64
+ )
65
+
66
+ self.output_layer = nn.Linear(hidden_size, n_features)
67
+
68
+ def forward(self, x, hidden):
69
+ print(f"Decoder input shape: {x.shape}")
70
+
71
+ x, (hidden_n, cell_n) = self.rnn1(x, hidden)
72
+ x, (hidden_n, cell_n) = self.rnn2(x)
73
+
74
+ x = self.output_layer(x)
75
+ print(f"Decoder output shape: {x.shape}")
76
+ return x
77
+
78
+ class Autoencoder(nn.Module):
79
+ def __init__(self, seq_len, n_features):
80
+ super(Autoencoder, self).__init__()
81
+ self.encoder = Encoder(seq_len, n_features)
82
+ self.decoder = Decoder(seq_len, n_features)
83
+
84
+ def forward(self, x):
85
+ x, hidden = self.encoder(x)
86
+ x = self.decoder(x, hidden)
87
+ return x
88
+
89
+ def create_dataset(df):
90
+ # Convert DataFrame to numpy array
91
+ sequence = df.values
92
+
93
+ # Get actual dimensions
94
+ n_rows, n_features = sequence.shape
95
+ print(f"Dataset shape: {sequence.shape}")
96
+
97
+ # Validate dimensions
98
+ if n_rows % n_features != 0:
99
+ print(f"Warning: Number of rows ({n_rows}) is not divisible by number of features ({n_features})")
100
+ # Adjust the sequence length to be divisible
101
+ n_rows = (n_rows // n_features) * n_features
102
+ sequence = sequence[:n_rows]
103
+ print(f"Adjusted dataset shape: {sequence.shape}")
104
+
105
+ return sequence, n_rows, n_features