File size: 3,210 Bytes
da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 65a7426 da21be1 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 | import gradio as gr
import pandas as pd
import os
import json
import tempfile
from openai import OpenAI
# Initialize OpenAI client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# -------- Speech to Text --------
def transcribe_audio(file_path):
with open(file_path, "rb") as audio:
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio
)
return transcript.text
# -------- Extract CRM Fields (SAFE JSON) --------
def extract_fields(text):
prompt = f"""
Extract the following fields from the conversation:
Name, Phone, Product, Budget, Location, Intent.
Return ONLY valid JSON like:
{{
"Name": "",
"Phone": "",
"Product": "",
"Budget": "",
"Location": "",
"Intent": ""
}}
Conversation:
{text}
"""
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}],
temperature=0
)
content = response.choices[0].message.content
try:
data = json.loads(content)
except:
data = {
"Name": "",
"Phone": "",
"Product": "",
"Budget": "",
"Location": "",
"Intent": ""
}
return data
# -------- Main Processing --------
def process_audio(audio_file):
if audio_file is None:
return "No audio provided", pd.DataFrame(), None
try:
file_path = audio_file # Gradio gives filepath directly
# Step 1: Transcription
text = transcribe_audio(file_path)
# Step 2: Extraction
data = extract_fields(text)
# Step 3: Convert to DataFrame
df = pd.DataFrame([data])
# Step 4: Save Excel
excel_path = os.path.join(tempfile.gettempdir(), "crm_output.xlsx")
df.to_excel(excel_path, index=False)
return text, df, excel_path
except Exception as e:
return f"Error: {str(e)}", pd.DataFrame(), None
# -------- UI --------
with gr.Blocks() as app:
gr.Markdown("# ποΈ AI Voice to CRM Auto Filler")
with gr.Tabs():
# π€ Record
with gr.Tab("π€ Record Inquiry"):
mic_input = gr.Audio(
sources=["microphone"],
type="filepath",
label="Record Audio"
)
btn1 = gr.Button("Process Recording")
# π Upload
with gr.Tab("π Upload Voice"):
file_input = gr.Audio(
sources=["upload"],
type="filepath",
label="Upload Audio File"
)
btn2 = gr.Button("Process File")
# Outputs
transcript_output = gr.Textbox(label="Transcription")
table_output = gr.Dataframe(label="Extracted CRM Data")
download_btn = gr.File(label="Download Excel")
# Actions
btn1.click(
fn=process_audio,
inputs=mic_input,
outputs=[transcript_output, table_output, download_btn]
)
btn2.click(
fn=process_audio,
inputs=file_input,
outputs=[transcript_output, table_output, download_btn]
)
# Launch
app.launch()
|