Spaces:
Sleeping
Sleeping
File size: 6,057 Bytes
98c6f77 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
import os
import streamlit as st
from together import Together
from PIL import Image
import moviepy.editor as mp
from gtts import gTTS
import tempfile
import base64
from io import BytesIO
import numpy as np
# Set up Together.ai API key
together_api_key = os.environ.get("TOGETHER_API_KEY",)
client = Together(api_key=together_api_key)
def generate_interview_transcript(role, experience, additional_details, interview_type):
prompt = f"""As a professional interview transcript writer, create a {interview_type} mock interview script for a {experience} {role} candidate.
Include interviewer and candidate responses using gender-neutral names.
The interview should be realistic, professional, and concise.
Incorporate the following additional details about the candidate: {additional_details}
Format the transcript as follows:
Interviewer (Alex): [Question]
Candidate (Sam): [Response]
Create a short but relevant interview with 3-5 question-answer pairs."""
response = client.chat.completions.create(
model="meta-llama/Llama-3-8b-chat-hf",
messages=[
{"role": "system", "content": "You are a professional interview transcript writer."},
{"role": "user", "content": prompt}
],
)
return response.choices[0].message.content
def generate_audio(script, lang='en'):
lines = script.split('\n')
full_text = " ".join([line.split(':', 1)[1].strip() for line in lines if ':' in line])
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp:
audio_file = temp.name
tts = gTTS(text=full_text, lang=lang, slow=False)
tts.save(audio_file)
return audio_file
def generate_image(prompt):
response = client.images.generate(
prompt=prompt,
model="stabilityai/stable-diffusion-2-1",
steps=30,
width=512,
height=512,
n=1
)
image_data = base64.b64decode(response.data[0].b64_json)
return Image.open(BytesIO(image_data))
def create_video_with_images_and_audio(script, audio_file, interviewer_img, interviewee_img, output_video):
audio_clip = mp.AudioFileClip(audio_file)
total_audio_duration = audio_clip.duration
lines = [line for line in script.split('\n') if ':' in line]
clips = []
start_time = 0
for line in lines:
speaker, text = line.split(':', 1)
text = text.strip()
duration = len(text.split()) * 0.5 # Estimate duration based on word count
if 'Interviewer' in speaker:
img = np.array(interviewer_img)
else:
img = np.array(interviewee_img)
clip = mp.ImageClip(img).set_duration(duration)
clip = clip.set_start(start_time)
clips.append(clip)
start_time += duration
video = mp.CompositeVideoClip(clips, size=(512, 512))
final_clip = video.set_audio(audio_clip)
final_clip.write_videofile(output_video, codec='libx264', fps=24)
def create_mock_interview(role, experience, additional_details, interview_type):
with st.spinner("Generating interview transcript..."):
transcript = generate_interview_transcript(role, experience, additional_details, interview_type)
st.success("Interview transcript generated!")
st.text_area("Generated Transcript", transcript, height=200)
with st.spinner("Generating audio..."):
audio_file = generate_audio(transcript)
st.success("Audio generated!")
with st.spinner("Generating images for interviewer and interviewee..."):
interviewer_img = generate_image("A professional interviewer sitting in a modern office setting, facing the camera, upper body shot")
interviewee_img = generate_image(f"A confident{experience} job candidate sitting for an interview, facing the camera, upper body shot")
st.success("Images generated!")
with st.spinner("Creating final video..."):
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp:
output_video = temp.name
create_video_with_images_and_audio(transcript, audio_file, interviewer_img, interviewee_img, output_video)
st.success("Video created successfully!")
return output_video
# Streamlit UI
st.set_page_config(page_title="AIInterviewCoach", page_icon="π₯", layout="wide")
st.title("π¬ AIInterviewCoach")
st.subheader("Generate Custom Mock Interview Videos with AI")
col1, col2 = st.columns(2)
with col1:
role = st.text_input("π¨ Job Role", "Data Analyst")
experience = st.selectbox("π Experience Level", ["Entry-level", "Mid-level", "Senior", "Executive"])
with col2:
interview_type = st.selectbox("π Interview Scenario", ["Standard", "Behavioral", "Technical", "Case Study"])
additional_details = st.text_area("β¨ Additional Details", "Proficient in SQL and Python")
if st.button("π₯ Generate Mock Interview"):
st.write("Generating interview based on the following parameters:")
st.write(f"- Role: {role}")
st.write(f"- Experience Level: {experience}")
st.write(f"- Interview Type: {interview_type}")
st.write(f"- Additional Details: {additional_details}")
output_video = create_mock_interview(role, experience, additional_details, interview_type)
st.success("π Your mock interview is ready!")
st.video(output_video)
with open(output_video, 'rb') as f:
st.download_button(
label="π₯ Download Video",
data=f,
file_name="my_mock_interview.mp4",
mime="video/mp4"
)
os.unlink(output_video) # Delete the temporary file
st.sidebar.title("π About AIInterviewCoach")
st.sidebar.info("""
AIInterviewCoach creates realistic mock interview videos.
π§ Features:
- Custom scenarios
- Role-specific questions
- AI-generated visuals
- Professional voice-overs
π Created by: Dhruv Tibarewal
""")
st.sidebar.title("π‘ Tips")
st.sidebar.success("""
- Be specific about your role
- Share unique skills
- Try different scenarios
- Practice, iterate, succeed! π
""") |