satish99017 commited on
Commit
e1f9906
·
1 Parent(s): 0b01cbe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -56
app.py CHANGED
@@ -2,60 +2,64 @@ import streamlit as st
2
  from transformers import pipeline
3
 
4
 
5
- model = pipeline("sentiment-analysis")
6
- st.title("Hugging Face Model Demo")
7
- input_text = st.text_input("Enter your text", "")
8
- if st.button("Analyze"):
9
- # Perform inference using the loaded model
10
- result = model(input_text)
11
- st.write("Prediction:", result[0]['label'], "| Score:", result[0]['score'])
12
-
13
- ###
14
- # def recording_storing():
15
- # chunk = 1024 # Record in chunks of 1024 samples
16
- # sample_format = pyaudio.paInt16 # 16 bits per sample
17
- # channels = 1
18
- # fs = 44100 # Record at 44100 samples per second
19
- # seconds = 4
20
- # # filename = "output.wav"
21
-
22
- # p = pyaudio.PyAudio() # Create an interface to PortAudio
23
-
24
- # print('Recording')
25
-
26
- # stream = p.open(format=sample_format,
27
- # channels=channels,
28
- # rate=fs,
29
- # frames_per_buffer=chunk,
30
- # input=True)
31
-
32
- # frames = [] # Initialize array to store frames
33
-
34
- # # Store data in chunks for 3 seconds
35
- # for i in range(0, int(fs / chunk * seconds)):
36
- # data = stream.read(chunk)
37
- # frames.append(data)
38
-
39
- # # Stop and close the stream
40
- # stream.stop_stream()
41
- # stream.close()
42
- # # Terminate the PortAudio interface
43
- # p.terminate()
44
-
45
- # print('Finished recording')
46
-
47
- # # Save the recorded data as a WAV file
48
- # wf = wave.open(filename, 'wb')
49
- # wf.setnchannels(channels)
50
- # wf.setsampwidth(p.get_sample_size(sample_format))
51
- # wf.setframerate(fs)
52
- # wf.writeframes(b''.join(frames))
53
- # wf.close()
54
-
55
-
56
-
57
- # return "recorded"
58
-
59
- # audio = recording_storing()
60
- # print(audio)
 
 
 
 
61
 
 
2
  from transformers import pipeline
3
 
4
 
5
+ from scipy.io.wavfile import write
6
+ import gradio as gr
7
+ import wavio
8
+ input_file = 'recorded.wav'
9
+ output_file = 'output_filtered_receiver.wav'
10
+
11
+ low_frequency = 18000
12
+ high_frequency = 19000
13
+ bit_duration = 0.007
14
+ sample_rate = 44100
15
+ amplitude_scaling_factor = 10.0
16
+
17
+ def record(audio):
18
+ """
19
+ This function records audio and writes it to a .wav file.
20
+ Parameters:
21
+ audio (tuple): A tuple containing the sample rate and the audio data.
22
+ Returns:
23
+ str: A success message if the audio is recorded correctly, otherwise an error message.
24
+ """
25
+ try:
26
+ # Check if the audio tuple contains exactly two elements
27
+ if len(audio) != 2:
28
+ return f"Error: Expected a tuple with 2 elements, but got {len(audio)}"
29
+
30
+ # Unpack the sample rate and data from the audio tuple
31
+ sr, data = audio
32
+
33
+ # Write the audio data to a .wav file
34
+ wavio.write("recorded.wav", data, sr)
35
+
36
+ # Call the filtered function to apply the bandpass filter to the audio data
37
+ filtered()
38
+
39
+ # Return a success message
40
+ return f"Audio receive correctly"
41
+ except Exception as e:
42
+ # If an error occurs, return an error message
43
+ return f"Error: {str(e)}"
44
+
45
+ with gr.Blocks() as demo:
46
+ btn_record = gr.Button(value="record")
47
+ btn_record.click(fn=record, inputs=input_audio, outputs=output_text)
48
+
49
+
50
+ demo.launch()
51
+
52
+
53
+
54
+ ######################## models
55
+ # model = pipeline("sentiment-analysis")
56
+ # st.title("Hugging Face Model Demo")
57
+ # input_text = st.text_input("Enter your text", "")
58
+ # if st.button("Analyze"):
59
+ # # Perform inference using the loaded model
60
+ # result = model(input_text)
61
+ # st.write("Prediction:", result[0]['label'], "| Score:", result[0]['score'])
62
+
63
+
64
+
65