import gradio as gr from openai import OpenAI from dotenv import load_dotenv import os # Try to load .env file if it exists (for local development) load_dotenv() # Initialize OpenAI client - will use OPENAI_API_KEY from environment # For Hugging Face Spaces, set this as a secret in the Space settings api_key = os.getenv("OPENAI_API_KEY") if not api_key: raise ValueError("OPENAI_API_KEY not found. Please set it in your environment variables or Hugging Face Space secrets.") client = OpenAI(api_key=api_key) # File paths - adjust based on where the app runs # For Hugging Face Spaces, files should be in the root or adjust paths accordingly cv_path = "src/cv/me.txt" if os.path.exists("src/cv/me.txt") else "cv/me.txt" avatar_path = "src/cv/avatar.jpeg" if os.path.exists("src/cv/avatar.jpeg") else "cv/avatar.jpeg" projects_base = "src/projects_images" if os.path.exists("src/projects_images") else "projects_images" projects = [ {"image": f"{projects_base}/s_up.jpeg", "title": "Ai Recommendation System"}, {"image": f"{projects_base}/llm.jpeg", "title": "LLM Automation"}, {"image": f"{projects_base}/bi.png", "title": "BI"}, {"image": f"{projects_base}/robot.png", "title": "Robot Arm Control With Ros Python and AI "}, ] with open(cv_path, "r") as f: cv_text = f.read() system_prompt = f""" Your name is Alexander.You are acting as Alexander Todorov. You will answer questions related to your career, skills, work experience, and education. \ Questions will be asked by visitors, headhunters, or recruiters about potential job opportunities. \ Respond professionally and use professional language. \ Answer only questions that are directly related to your CV. If you do not find the answer in your CV, respond with: \ "I can only answer questions about my CV." CV: {cv_text} With this context, please chat with the user, always staying in character as Alexander Todorov. """ def chat(message, history): messages = [{"role":"system", "content":system_prompt}] + history + [{"role":"user", "content":message}] response = client.chat.completions.create(model="gpt-4o-mini", messages=messages) return response.choices[0].message.content with gr.Blocks() as ui: # name and job title with gr.Row(): with gr.Column(scale=1): gr.Markdown('
Alexander Todorov
') with gr.Column(scale=4): gr.Markdown(""" LinkedIn """) # ********************************************************************************************************* with gr.Row(): with gr.Column(scale=1): # 1 part gr.Image(avatar_path, type="pil", show_label=False, height=150, interactive=False, container=False, buttons=[['download', 'share', 'fullscreen']]) # Right column 75% width with gr.Column(scale=3): # 3 parts gr.Markdown("""

Software and Data Engineer with over five years of experience delivering intelligent, user-focused AI solutions and driving automation and innovation in complex environments.

""") # ****************************************************************************************************************** # Chatbot gr.Markdown(f'

Chat with Me About My CV

', elem_id="job-title-light") gr.Markdown('
', elem_id="custom_divider") chatbot = gr.Chatbot(placeholder="Interactive CV Guide
Ask Me Anything", height=300) chat_interface = gr.ChatInterface(fn=chat, chatbot=chatbot) gr.Markdown('
') # **************************************************************************************************************** # Projects gr.Markdown(f'

Examples of My Work

', elem_id="job-title-light") gr.Markdown('
', elem_id="custom_divider") # Projects row with gr.Row(): for project in projects: with gr.Column(): # equal width for each project # Project image gr.Image(project["image"], type="pil", show_label=False, interactive=False, height=200, width=350, buttons=[['download', 'share', 'fullscreen']]) # Project title / text gr.Markdown(f"
{project['title']}
") if __name__ == "__main__": ui.launch()