Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import numpy as np | |
| from groq import Groq | |
| import io | |
| from scipy.io import wavfile | |
| import os | |
| from dotenv import load_dotenv | |
| # Load environment variables | |
| load_dotenv() | |
| # Initialize the Groq client | |
| api_key = os.getenv("GROQ_API_KEY") | |
| client = Groq(api_key=api_key) | |
| def transcribe(audio): | |
| sr, y = audio | |
| # Convert to mono if stereo | |
| if y.ndim > 1: | |
| y = y.mean(axis=1) | |
| y = y.astype(np.float32) | |
| y /= np.max(np.abs(y)) | |
| # Convert numpy array to WAV file in memory | |
| bytes_io = io.BytesIO() | |
| wavfile.write(bytes_io, sr, y) | |
| bytes_io.seek(0) | |
| # Use Groq's Whisper API for transcription | |
| transcription = client.audio.transcriptions.create( | |
| file=("audio.wav", bytes_io), | |
| model="whisper-large-v3", | |
| response_format="text" | |
| ) | |
| return transcription | |
| def generate_response(prompt, system_prompt): | |
| chat_completion = client.chat.completions.create( | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": system_prompt | |
| }, | |
| { | |
| "role": "user", | |
| "content": prompt | |
| } | |
| ], | |
| model="llama-3.1-70b-versatile", | |
| temperature=0.5, | |
| max_tokens=300 | |
| ) | |
| return chat_completion.choices[0].message.content | |
| def interview_helper(audio): | |
| if audio is None: | |
| return "No audio recorded", "Please record audio first." | |
| # Transcribe the audio | |
| transcription = transcribe(audio) | |
| system_prompt = """ | |
| You are an AI assistant designed to assist in generating clear and concise responses to interview questions, reflecting David Fernandes' skills, experiences, and achievements as outlined in his details and any additional context provided. | |
| Instructions: | |
| 1. Resume Context: | |
| - Name: David A. D. Fernandes | |
| - Role: Software Engineer specializing in AI/ML | |
| - Contact Information: fernandesdavid.work@gmail.com, +91 8975167937 | |
| - Location: Goa, India | |
| 2. Professional Summary: | |
| - David is a Computer Science graduate with a strong foundation in software development and specialization in Artificial Intelligence and Machine Learning. He has a proven track record of leveraging AI/ML techniques to solve complex challenges, drive innovation, and deliver impactful solutions through research and hands-on projects. | |
| 3. Key Projects: | |
| - Mixture-of-Agents (MoA): Developed using Llama 3.1 8B models, achieving significant improvements in reasoning, coding, and creativity tasks. | |
| - Jarvis - An All-in-One AI Solution: Led a project integrating advanced AI technologies for robust and scalable functionalities. | |
| 4. Education: | |
| - BSc Computer Science (Honours) from St Xaviers College of Arts Science & Commerce Mapusa-Goa with a CGPA of 3.83/4. | |
| 5. Technical Skills: | |
| - Programming Languages: Strong in Python; familiar with C/C++, Java, JavaScript; additional skills in SQL, HTML/CSS, Assembly. | |
| - Frameworks/Libraries: Scikit-Learn, TensorFlow, PyTorch, Keras for ML; Langchain, HuggingFace for NLP; FastAPI for web development. | |
| 6. Soft Skills: | |
| - Problem-solving, analytical thinking, effective communication, teamwork, adaptability, attention to detail, innovation. | |
| 7. Courses & Certificates: | |
| - IBM AI Engineering (Coursera), Machine Learning Specialization (Stanford University), Machine Learning Training (Internshala Trainings). | |
| Additional Context: | |
| - Note that this interview is for a Graduate Machine Learning Engineer 1 at Uber. | |
| Response Guidelines: | |
| - Use the details and Instructions provided to tailor responses that highlight David's strengths and achievements. | |
| - Incorporate any additional context provided to address specific questions or scenarios. | |
| - Ensure responses are clear and concise so they can be read directly by David during the interview. | |
| - Maintain a professional tone and focus on delivering succinct yet comprehensive answers. | |
| - Answer the questions as if a human would. | |
| - Use best practices to respond to the interviewee's questions. | |
| """ | |
| # Generate response using LLM | |
| response = generate_response(transcription, system_prompt) | |
| return transcription, response | |
| # Create Gradio interface | |
| demo = gr.Interface( | |
| fn=interview_helper, | |
| inputs=gr.Audio(sources=['microphone'], type="numpy"), | |
| outputs=[ | |
| gr.Textbox(label="Transcribed Question"), | |
| gr.Textbox(label="AI Response") | |
| ], | |
| title="Interview Helper", | |
| description="Speak a question, and get an AI-generated response." | |
| ) | |
| # Launch the app | |
| demo.launch() |