Spaces:
Sleeping
Sleeping
| """English Interview Coach — Gradio app powered by LangGraph. | |
| Supports multiple LLM providers: OpenAI, Anthropic (Claude), Google Gemini. | |
| API keys are read from HF Space Secrets (environment variables). | |
| Tries providers in order until one works. | |
| """ | |
| import os | |
| import random | |
| import traceback | |
| import uuid | |
| from typing import Annotated, Optional, TypedDict | |
| import gradio as gr | |
| from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage | |
| from langgraph.checkpoint.memory import MemorySaver | |
| from langgraph.graph import END, START, StateGraph | |
| from langgraph.graph.message import add_messages | |
| # --------------------------------------------------------------------------- | |
| # LLM providers — each returns a LangChain chat model | |
| # --------------------------------------------------------------------------- | |
| PROVIDERS = ["openai", "gemini", "anthropic"] | |
| def _build_openai(): | |
| from langchain_openai import ChatOpenAI | |
| key = os.getenv("OPENAI_API_KEY", "") | |
| if not key: | |
| raise ValueError("OPENAI_API_KEY not set") | |
| return ChatOpenAI(model="gpt-4o-mini", openai_api_key=key, temperature=0.7) | |
| def _build_gemini(): | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| key = os.getenv("GEMINI_API_KEY", "") | |
| if not key: | |
| raise ValueError("GEMINI_API_KEY not set") | |
| return ChatGoogleGenerativeAI( | |
| model="gemini-2.5-flash", | |
| google_api_key=key, | |
| temperature=0.7, | |
| ) | |
| def _build_anthropic(): | |
| from langchain_anthropic import ChatAnthropic | |
| key = os.getenv("ANTHROPIC_API_KEY", "") | |
| if not key: | |
| raise ValueError("ANTHROPIC_API_KEY not set") | |
| return ChatAnthropic( | |
| model="claude-sonnet-4-20250514", | |
| anthropic_api_key=key, | |
| temperature=0.7, | |
| max_tokens=2048, | |
| ) | |
| _BUILDER = { | |
| "openai": _build_openai, | |
| "gemini": _build_gemini, | |
| "anthropic": _build_anthropic, | |
| } | |
| def get_llm(): | |
| """Try providers in order, return first that works.""" | |
| preferred = os.getenv("LLM_PROVIDER", "openai") | |
| order = [preferred] + [p for p in PROVIDERS if p != preferred] | |
| errors = [] | |
| for name in order: | |
| try: | |
| llm = _BUILDER[name]() | |
| print(f"[coach] Using provider: {name}") | |
| return llm | |
| except Exception as e: | |
| errors.append(f"{name}: {e}") | |
| raise RuntimeError("All LLM providers failed:\n" + "\n".join(errors)) | |
| # --------------------------------------------------------------------------- | |
| # State & Prompts | |
| # --------------------------------------------------------------------------- | |
| class CoachState(TypedDict): | |
| messages: Annotated[list[BaseMessage], add_messages] | |
| mode: str | |
| interview_type: Optional[str] | |
| current_question: Optional[str] | |
| corrections_count: int | |
| session_topic: Optional[str] | |
| INTERVIEW_PROMPT = """\ | |
| You are an expert English interview coach specializing in helping Brazilian software developers \ | |
| prepare for international job interviews. | |
| Your role: | |
| 1. Ask interview questions one at a time (behavioral, technical, or situational). | |
| 2. After the user answers, provide detailed feedback on: | |
| - Content quality: Was the answer structured? Did it use the STAR method for behavioral questions? | |
| - Grammar corrections: Fix any errors and explain WHY in simple terms. | |
| - Vocabulary improvements: Suggest more professional/natural alternatives. | |
| - Common Brazilian-English mistakes: Flag literal translations from Portuguese \ | |
| (e.g., "I have 5 years" instead of "I have been working for 5 years"). | |
| 3. Give a brief improved version of their answer as a model response. | |
| 4. Then ask the next question. | |
| Guidelines: | |
| - Speak in English. Only use Portuguese for brief grammar explanations when needed. | |
| - Be encouraging but honest — the goal is real improvement. | |
| - Focus on tech interview context: FAANG-style behavioral, system design discussions, \ | |
| team collaboration scenarios. | |
| - When the user says "done", "exit", or "quit", summarize their performance with \ | |
| key strengths and areas to improve. | |
| Common Brazilian-English pitfalls to watch for: | |
| - False cognates (pretend ≠ pretender, actually ≠ atualmente) | |
| - Missing articles (a/an/the) | |
| - Present perfect misuse ("I work here since 2020" → "I have worked here since 2020") | |
| - Preposition errors ("depend of" → "depend on") | |
| - Word order in questions | |
| - Overuse of "make" vs "do" | |
| """ | |
| QUESTIONS = { | |
| "behavioral": [ | |
| "Tell me about yourself and your experience as a software developer.", | |
| "Describe a time when you had to deal with a difficult team member.", | |
| "Tell me about a project you're most proud of. What was your role?", | |
| "Describe a situation where you had to meet a tight deadline.", | |
| "Give me an example of a time you made a mistake at work. What did you learn?", | |
| "Tell me about a time you had to learn a new technology quickly.", | |
| "Describe a conflict you had with a colleague and how you resolved it.", | |
| "Tell me about a time you went above and beyond what was expected.", | |
| "How do you handle feedback, especially negative feedback?", | |
| "Describe a situation where you had to persuade someone to see things your way.", | |
| ], | |
| "technical": [ | |
| "How would you explain REST vs GraphQL to a non-technical person?", | |
| "What's your approach to debugging a production issue you can't reproduce locally?", | |
| "How do you decide between building from scratch versus using a library?", | |
| "Describe your experience with CI/CD pipelines.", | |
| "What strategies do you use to write maintainable and scalable code?", | |
| "How do you approach code reviews? What do you look for?", | |
| "Explain how you would design a simple URL shortener service.", | |
| "What's your experience with testing? How do you decide what to test?", | |
| "How do you handle technical debt in a project?", | |
| "Describe your experience with cloud services (AWS, GCP, Azure).", | |
| ], | |
| "situational": [ | |
| "Your team disagrees on the technical approach. What do you do?", | |
| "Requirements are unclear two days before deadline. How do you handle it?", | |
| "A senior dev gives harsh code review criticism. How do you respond?", | |
| "You're assigned a project with an unfamiliar tech stack. What's your plan?", | |
| "Your manager asks you to estimate a task you know nothing about. What do you do?", | |
| "You discover a critical bug on Friday evening. What steps do you take?", | |
| "A teammate consistently misses deadlines, affecting your work. How do you handle it?", | |
| "You're asked to build a feature you think is a bad idea. What do you do?", | |
| "The client changes requirements mid-sprint. How do you handle it?", | |
| "You need to present a technical decision to non-technical stakeholders. How?", | |
| ], | |
| } | |
| # --------------------------------------------------------------------------- | |
| # LangGraph node | |
| # --------------------------------------------------------------------------- | |
| def interview_node(state: CoachState) -> dict: | |
| """Process user input, give feedback, ask next question.""" | |
| llm = get_llm() | |
| messages = state["messages"] | |
| q_type = random.choice(["behavioral", "technical", "situational"]) | |
| next_question = random.choice(QUESTIONS[q_type]) | |
| if len(messages) <= 1: | |
| response = llm.invoke([ | |
| SystemMessage(content=INTERVIEW_PROMPT), | |
| SystemMessage( | |
| content=f"Start the mock interview. Greet the user briefly and ask: {next_question}" | |
| ), | |
| *(messages if messages else []), | |
| ]) | |
| else: | |
| current_q = state.get("current_question") or "the previous question" | |
| response = llm.invoke([ | |
| SystemMessage(content=INTERVIEW_PROMPT), | |
| *messages, | |
| SystemMessage( | |
| content=( | |
| f"The user answered: '{current_q}'. " | |
| "Give detailed feedback (content, grammar, vocabulary, BR mistakes). " | |
| f"Provide an improved model answer. Then ask: '{next_question}'" | |
| ) | |
| ), | |
| ]) | |
| return { | |
| "messages": [response], | |
| "current_question": next_question, | |
| "corrections_count": state.get("corrections_count", 0) + 1, | |
| } | |
| # --------------------------------------------------------------------------- | |
| # Build graph | |
| # --------------------------------------------------------------------------- | |
| checkpointer = MemorySaver() | |
| builder = StateGraph(CoachState) | |
| builder.add_node("interview", interview_node) | |
| builder.add_edge(START, "interview") | |
| builder.add_edge("interview", END) | |
| graph = builder.compile(checkpointer=checkpointer) | |
| # --------------------------------------------------------------------------- | |
| # Gradio interface | |
| # --------------------------------------------------------------------------- | |
| def chat(message: str, history: list[dict]) -> str: | |
| """Gradio chat handler — routes messages through LangGraph.""" | |
| thread_id = "gradio-session" | |
| config = {"configurable": {"thread_id": thread_id}} | |
| try: | |
| if not history: | |
| result = graph.invoke( | |
| { | |
| "messages": [HumanMessage(content=message)], | |
| "mode": "interview", | |
| "interview_type": None, | |
| "current_question": None, | |
| "corrections_count": 0, | |
| "session_topic": None, | |
| }, | |
| config, | |
| ) | |
| else: | |
| result = graph.invoke( | |
| {"messages": [HumanMessage(content=message)]}, | |
| config, | |
| ) | |
| last_msg = result["messages"][-1] | |
| return last_msg.content | |
| except Exception as e: | |
| traceback.print_exc() | |
| return f"**Error:** {e}\n\nPlease try again or check the Space logs." | |
| _chat_kwargs = dict( | |
| fn=chat, | |
| title="English Interview Coach", | |
| description=( | |
| "Practice English for international tech interviews. " | |
| "Answer questions and get feedback on grammar, vocabulary, and content. " | |
| "Built with LangGraph + Gradio. Provider: " | |
| + os.getenv("LLM_PROVIDER", "openai") | |
| ), | |
| examples=[ | |
| "Let's start a mock interview!", | |
| "I want to practice behavioral questions.", | |
| "Ask me a technical question about system design.", | |
| ], | |
| theme=gr.themes.Soft(), | |
| ) | |
| # Gradio 5 needs type="messages", Gradio 6 removed it | |
| import importlib.metadata as _meta | |
| _gradio_ver = tuple(int(x) for x in _meta.version("gradio").split(".")[:2]) | |
| if _gradio_ver < (6, 0): | |
| _chat_kwargs["type"] = "messages" | |
| demo = gr.ChatInterface(**_chat_kwargs) | |
| if __name__ == "__main__": | |
| demo.launch() | |