mistermprah commited on
Commit
b10f2fc
·
verified ·
1 Parent(s): 4783bb7

Upload 13 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ artifact.wav filter=lfs diff=lfs merge=lfs -text
37
+ extra_hystole.wav filter=lfs diff=lfs merge=lfs -text
38
+ FYP_Model filter=lfs diff=lfs merge=lfs -text
39
+ model.safetensors.crdownload filter=lfs diff=lfs merge=lfs -text
FYP_Model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd8f2304cff40ac674bf246b58682ef1e617f9e66a11372efffdf90b7ffe99e
3
+ size 57901174
app.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+ import torchaudio
4
+ from config import MODEL_ID
5
+
6
+ # Load the model and pipeline using the model_id variable
7
+ pipe = pipeline("audio-classification", model=MODEL_ID)
8
+
9
+ def classify_audio(filepath):
10
+ preds = pipe(filepath)
11
+ outputs = {"normal": 0.0, "artifact": 0.0, "murmur": 0.0}
12
+ for p in preds:
13
+ label = p["label"]
14
+ # Simplify the labels and accumulate the scores
15
+ if "artifact" in label:
16
+ outputs["artifact"] += p["score"]
17
+ elif "murmur" in label:
18
+ outputs["murmur"] += p["score"]
19
+ elif "extra" in label or "Normal" in label:
20
+ outputs["normal"] += p["score"]
21
+ return outputs
22
+
23
+ # Streamlit app layout
24
+ st.title("Heartbeat Sound Classification")
25
+
26
+ # Theme selection
27
+ theme = st.sidebar.selectbox(
28
+ "Select Theme",
29
+ ["Light Green", "Light Blue"]
30
+ )
31
+
32
+ # Add custom CSS for styling based on the selected theme
33
+ if theme == "Light Green":
34
+ st.markdown(
35
+ """
36
+ <style>
37
+ body, .stApp {
38
+ background-color: #e8f5e9; /* Light green background */
39
+ }
40
+ .stApp {
41
+ color: #004d40; /* Dark green text */
42
+ }
43
+ .stButton > button, .stFileUpload > div {
44
+ background-color: #004d40; /* Dark green button and file uploader background */
45
+ color: white; /* White text */
46
+ }
47
+ .stButton > button:hover, .stFileUpload > div:hover {
48
+ background-color: #00332c; /* Darker green on hover */
49
+ }
50
+ </style>
51
+ """,
52
+ unsafe_allow_html=True
53
+ )
54
+ elif theme == "Light Blue":
55
+ st.markdown(
56
+ """
57
+ <style>
58
+ body, .stApp {
59
+ background-color: #e0f7fa; /* Light blue background */
60
+ }
61
+ .stApp {
62
+ color: #006064; /* Dark blue text */
63
+ }
64
+ .stButton > button, .stFileUpload > div {
65
+ background-color: #006064; /* Dark blue button and file uploader background */
66
+ color: white; /* White text */
67
+ }
68
+ .stButton > button:hover, .stFileUpload > div:hover {
69
+ background-color: #004d40; /* Darker blue on hover */
70
+ }
71
+ </style>
72
+ """,
73
+ unsafe_allow_html=True
74
+ )
75
+
76
+
77
+ # File uploader for audio files
78
+ uploaded_file = st.file_uploader("Upload an audio file", type=["wav", "mp3"])
79
+
80
+ if uploaded_file is not None:
81
+ st.subheader("Uploaded Audio File")
82
+ # Load and display the audio file
83
+ audio_bytes = uploaded_file.read()
84
+ st.audio(audio_bytes, format='audio/wav')
85
+
86
+ # Save the uploaded file to a temporary location
87
+ with open("temp_audio_file.wav", "wb") as f:
88
+ f.write(audio_bytes)
89
+
90
+ # Classify the audio file
91
+ st.write("Classifying the audio...")
92
+ results = classify_audio("temp_audio_file.wav")
93
+
94
+ # Display the classification results in a dedicated output box
95
+ st.subheader("Classification Results")
96
+ results_box = st.empty()
97
+ results_str = "\n".join([f"{label}: {score:.2f}" for label, score in results.items()])
98
+ results_box.text(results_str)
99
+
100
+ # Sample Audio Files for classification
101
+ st.write("Sample Audio Files:")
102
+ examples = ['normal.wav', 'murmur.wav', 'extra_systole.wav', 'extra_hystole.wav', 'artifact.wav']
103
+ for example in examples:
104
+ if st.button(example):
105
+ st.subheader(f"Sample Audio: {example}")
106
+ audio_bytes = open(example, 'rb').read()
107
+ st.audio(audio_bytes, format='audio/wav')
108
+ results = classify_audio(example)
109
+ st.write("Results:")
110
+ results_str = "\n".join([f"{label}: {score:.2f}" for label, score in results.items()])
111
+ st.text(results_str)
artifact.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3dac4681a1f493576bf6855b6f621aeabdb71ed9e1fd8cc20023c450f51711b
3
+ size 793878
config.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ntu-spml/distilhubert",
3
+ "activation_dropout": 0.1,
4
+ "apply_spec_augment": false,
5
+ "architectures": [
6
+ "HubertForSequenceClassification"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "bos_token_id": 1,
10
+ "classifier_proj_size": 256,
11
+ "conv_bias": false,
12
+ "conv_dim": [
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512
20
+ ],
21
+ "conv_kernel": [
22
+ 10,
23
+ 3,
24
+ 3,
25
+ 3,
26
+ 3,
27
+ 2,
28
+ 2
29
+ ],
30
+ "conv_stride": [
31
+ 5,
32
+ 2,
33
+ 2,
34
+ 2,
35
+ 2,
36
+ 2,
37
+ 2
38
+ ],
39
+ "ctc_loss_reduction": "sum",
40
+ "ctc_zero_infinity": false,
41
+ "do_stable_layer_norm": false,
42
+ "eos_token_id": 2,
43
+ "feat_extract_activation": "gelu",
44
+ "feat_extract_norm": "group",
45
+ "feat_proj_dropout": 0.0,
46
+ "feat_proj_layer_norm": false,
47
+ "final_dropout": 0.0,
48
+ "hidden_act": "gelu",
49
+ "hidden_dropout": 0.1,
50
+ "hidden_size": 768,
51
+ "id2label": {
52
+ "0": "Atraining_artifact_aug",
53
+ "1": "Atraining_extrahls_aug",
54
+ "2": "Btraining_extrasystole_aug",
55
+ "3": "Btraining_murmur_aug",
56
+ "4": "Normal_aug"
57
+ },
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 3072,
60
+ "label2id": {
61
+ "Atraining_artifact_aug": "0",
62
+ "Atraining_extrahls_aug": "1",
63
+ "Btraining_extrasystole_aug": "2",
64
+ "Btraining_murmur_aug": "3",
65
+ "Normal_aug": "4"
66
+ },
67
+ "layer_norm_eps": 1e-05,
68
+ "layerdrop": 0.0,
69
+ "mask_feature_length": 10,
70
+ "mask_feature_min_masks": 0,
71
+ "mask_feature_prob": 0.0,
72
+ "mask_time_length": 10,
73
+ "mask_time_min_masks": 2,
74
+ "mask_time_prob": 0.05,
75
+ "model_type": "hubert",
76
+ "num_attention_heads": 12,
77
+ "num_conv_pos_embedding_groups": 16,
78
+ "num_conv_pos_embeddings": 128,
79
+ "num_feat_extract_layers": 7,
80
+ "num_hidden_layers": 2,
81
+ "pad_token_id": 0,
82
+ "torch_dtype": "float32",
83
+ "transformers_version": "4.38.2",
84
+ "use_weighted_layer_sum": false,
85
+ "vocab_size": 32
86
+ }
config.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # config.py
2
+ MODEL_ID = "arham061/distilhubert-finetuned-PASCAL_Dataset_Augmented"
events.out.tfevents.1710064702.c9211bb32db6.1776.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2908d1146b5efb8e524f6c7338fff5015aca93c816ccb9d7a83449cd35631699
3
+ size 71812
extra_hystole.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:277eede8e722df5d8a013a7d919618e39c3b0f3f3a00ddd843b20cd960b6baa5
3
+ size 793878
extra_systole.wav ADDED
Binary file (47.9 kB). View file
 
model.safetensors.crdownload ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f78f8ea5438daf850dd513a2b6f3775fc45641702fea46ff18ee05ea76a3ad6
3
+ size 62000965
murmur.wav ADDED
Binary file (42.4 kB). View file
 
normal.wav ADDED
Binary file (36.7 kB). View file
 
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
requirements (1).txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit
2
+ transformers
3
+ torch
4
+ torchaudio