yetog commited on
Commit
c33adea
·
verified ·
1 Parent(s): 44764c7

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +72 -0
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ def transcribe_audio(audio_file_path):
5
+ """Transcribe audio using an Inference Provider"""
6
+ client = InferenceClient(provider="auto")
7
+
8
+ # Pass the file path directly - the client handles file reading
9
+ transcript = client.automatic_speech_recognition(
10
+ audio=audio_file_path, model="openai/whisper-large-v3"
11
+ )
12
+
13
+ return transcript.text
14
+
15
+ def generate_summary(transcript):
16
+ """Generate summary using an Inference Provider"""
17
+ client = InferenceClient(provider="auto")
18
+
19
+ prompt = f"""
20
+ Analyze this meeting transcript and provide:
21
+ 1. A concise summary of key points
22
+ 2. Action items with responsible parties
23
+ 3. Important decisions made
24
+
25
+ Transcript: {transcript}
26
+
27
+ Format with clear sections:
28
+ ## Summary
29
+ ## Action Items
30
+ ## Decisions Made
31
+ """
32
+
33
+ response = client.chat.completions.create(
34
+ model="deepseek-ai/DeepSeek-R1-0528",
35
+ messages=[{"role": "user", "content": prompt}],
36
+ max_tokens=1000,
37
+ )
38
+
39
+ return response.choices[0].message.content
40
+
41
+ def process_meeting_audio(audio_file):
42
+ """Main processing function"""
43
+ if audio_file is None:
44
+ return "Please upload an audio file.", ""
45
+
46
+ try:
47
+ # Step 1: Transcribe
48
+ transcript = transcribe_audio(audio_file)
49
+
50
+ # Step 2: Summarize
51
+ summary = generate_summary(transcript)
52
+
53
+ return transcript, summary
54
+
55
+ except Exception as e:
56
+ # Catch any errors during the AI calls
57
+ return f"Error processing audio: {str(e)}", ""
58
+
59
+ # Create Gradio interface
60
+ app = gr.Interface(
61
+ fn=process_meeting_audio,
62
+ inputs=gr.Audio(label="Upload Meeting Audio", type="filepath"),
63
+ outputs=[
64
+ gr.Textbox(label="Transcript", lines=10),
65
+ gr.Textbox(label="Summary & Action Items", lines=8),
66
+ ],
67
+ title="🎤 AI Meeting Notes",
68
+ description="Upload audio to get instant transcripts and summaries.",
69
+ )
70
+
71
+ if __name__ == "__main__":
72
+ app.launch()