Spaces:
Sleeping
Sleeping
| import os | |
| import random | |
| import gradio as gr | |
| from langchain_openai import AzureChatOpenAI | |
| from langchain.prompts import PromptTemplate | |
| # Read credentials from environment variables (stored as Hugging Face Secrets) | |
| AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT") | |
| AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY") | |
| DEPLOYMENT_NAME = os.getenv("DEPLOYMENT_NAME") | |
| API_VERSION = os.getenv("API_VERSION") | |
| # Random temperature for variation | |
| def get_random_temperature(): | |
| return random.choice([0.1, 0.2, 0.3]) | |
| # The prompt template for extreme high-difficulty Q&A generation | |
| prompt = PromptTemplate( | |
| input_variables=["jd"], | |
| template=""" | |
| You are a veteran software architect/interviewer with 40+ years of deep expertise. | |
| Your task is to generate **30 unique, extremely high-difficulty technical QUESTION-ANSWER pairs** for senior-level technical screening. | |
| Instructions: | |
| • Carefully analyze the job description below to identify **Must have skills** (or equivalent). If not found, infer key skills from full text. | |
| • Each question must be **extremely difficult**, intended to challenge expert-level candidates. Cover: | |
| - Low-level internal mechanisms | |
| - Edge cases rarely encountered | |
| - Complex design trade-offs | |
| - Deep theoretical principles | |
| - Performance optimization details | |
| - Architectural decisions under constraints | |
| - Framework internals and lesser-known behaviors | |
| • Avoid any easy, medium, common interview, or non-technical questions. | |
| • DO NOT include questions on soft skills, project management, or team collaboration. | |
| • Each question must target **one very specific sub-topic**. | |
| • Answers should be **strict, concise** (one word or a short phrase, max 5 words). | |
| • Output must be strictly 60 lines: | |
| Q1: <question> | |
| A1: <answer> | |
| ... | |
| Q30: <question> | |
| A30: <answer> | |
| Job Description: | |
| {jd} | |
| """ | |
| ) | |
| # Call Azure OpenAI | |
| def generate_qna(jd_text): | |
| temperature = get_random_temperature() | |
| llm = AzureChatOpenAI( | |
| api_key=AZURE_OPENAI_API_KEY, | |
| azure_endpoint=AZURE_OPENAI_ENDPOINT, | |
| api_version=API_VERSION, | |
| temperature=temperature, | |
| model="gpt-4o-mini", | |
| deployment_name=DEPLOYMENT_NAME, | |
| ) | |
| chain = prompt | llm | |
| response = chain.invoke({"jd": jd_text}) | |
| return response.content | |
| # Robust parser to combine mixed Q+A formats into consistent pairs | |
| def parse_qna_to_checkboxes(generated_text): | |
| lines = generated_text.strip().split('\n') | |
| qna_pairs = [] | |
| i = 0 | |
| while i < len(lines): | |
| line = lines[i].strip() | |
| if line.startswith("Q") and "A" in line: | |
| qna_pairs.append(line) | |
| i += 1 | |
| elif line.startswith("Q"): | |
| question_line = line | |
| if i + 1 < len(lines): | |
| answer_line = lines[i+1].strip() | |
| combined = f"{question_line} {answer_line}" | |
| qna_pairs.append(combined) | |
| else: | |
| qna_pairs.append(question_line) | |
| i += 2 | |
| else: | |
| i += 1 | |
| return qna_pairs | |
| # Gradio UI | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🚀 Extreme High-Difficulty Interview Q&A Generator") | |
| jd_input = gr.Textbox(lines=20, label="Paste Job Description Here") | |
| generate_btn = gr.Button("Generate 30 Extreme Q&A") | |
| output_checkboxes = gr.CheckboxGroup(label="Select Maximum 10 Q&A", choices=[], interactive=True) | |
| finalize_btn = gr.Button("Finalize Selection") | |
| final_output = gr.Textbox(label="Final Selected Q&A", lines=20) | |
| def on_generate(jd_text): | |
| qna_pairs = parse_qna_to_checkboxes(generate_qna(jd_text)) | |
| return gr.update(choices=qna_pairs, value=[]) | |
| generate_btn.click(fn=on_generate, inputs=jd_input, outputs=output_checkboxes) | |
| def on_finalize(selected): | |
| if len(selected) > 10: | |
| return "⚠️ Please select maximum 10 Q&A only." | |
| return "\n".join(selected) | |
| finalize_btn.click(fn=on_finalize, inputs=output_checkboxes, outputs=final_output) | |
| if __name__ == "__main__": | |
| demo.launch() | |