File size: 3,455 Bytes
22efd93
01515a2
22efd93
a661e6d
 
22efd93
a661e6d
01515a2
a661e6d
 
 
 
01515a2
22efd93
a661e6d
01515a2
22efd93
01515a2
 
a661e6d
 
 
01515a2
 
 
 
22efd93
01515a2
a661e6d
01515a2
a661e6d
22efd93
01515a2
a661e6d
 
 
2025b65
01515a2
 
3ec05b8
22efd93
01515a2
a661e6d
2025b65
01515a2
22efd93
2025b65
a661e6d
01515a2
a661e6d
 
 
 
 
 
 
 
01515a2
 
a661e6d
 
 
 
 
 
 
 
01515a2
a661e6d
 
 
01515a2
a661e6d
 
22efd93
01515a2
a661e6d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22efd93
 
a661e6d
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
from transformers import GPT2LMHeadModel, GPT2Tokenizer

# Hugging Face model repository name
MODEL_NAME = "fine_tuned_gpt2"

# Load the model and tokenizer from Hugging Face
def load_model():
    print("Loading model from Hugging Face...")
    tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME)
    model = GPT2LMHeadModel.from_pretrained(MODEL_NAME)
    print("Model loaded successfully.")
    return model, tokenizer

# Initialize model and tokenizer
model, tokenizer = load_model()

# Functions for question generation and feedback
def generate_question(response, history):
    """
    Generate the next question based on the user's response and previous Q&A history.
    """
    input_text = ""
    for qa in history:
        input_text += f"Question: {qa['question']}\nAnswer: {qa['answer']}\n"
    input_text += f"Response: {response}\nNext Question:"

    inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
    outputs = model.generate(inputs, max_length=200, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return generated_text

def get_feedback(session_data):
    """
    Generate feedback for the session data.
    """
    input_text = "Session Feedback:\n"
    for qa in session_data:
        input_text += f"Question: {qa['question']}\nAnswer: {qa['answer']}\n"
    input_text += f"\nScore:"

    inputs = tokenizer.encode(input_text, return_tensors="pt", truncation=True, max_length=512)
    outputs = model.generate(inputs, max_length=500, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
    feedback = tokenizer.decode(outputs[0], skip_special_tokens=True)  # Decode and store the output
    return feedback


# Gradio Interface Functions
def question_interface(response, history):
    """
    Gradio interface function for question generation.
    """
    try:
        history = eval(history)  # Convert string input to list
        return generate_question(response, history)
    except Exception as e:
        return f"Error: {str(e)}"

def feedback_interface(session_data):
    """
    Gradio interface function for feedback generation.
    """
    try:
        session_data = eval(session_data)  # Convert string input to list
        return get_feedback(session_data)
    except Exception as e:
        return f"Error: {str(e)}"

# Gradio Interface Setup
question_iface = gr.Interface(
    fn=question_interface,
    inputs=[
        gr.Textbox(label="User Response"),
        gr.Textbox(label="History (list of Q&A in string format)"),
    ],
    outputs="text",
    title="AI Interview Assistant: Question Generator",
    description="Provide a response and Q&A history to generate the next interview question.",
    api_name="generate_question"  # Add this
)

feedback_iface = gr.Interface(
    fn=feedback_interface,
    inputs=gr.Textbox(label="Session Data (list of Q&A in string format)"),
    outputs="text",
    title="AI Interview Assistant: Feedback Generator",
    description="Provide session data to get feedback on your responses.",
    api_name="get_feedback"  # Add this
)


# Combine the Interfaces
iface = gr.TabbedInterface(
    interface_list=[question_iface, feedback_iface],
    tab_names=["Generate Question", "Get Feedback"]
)

# Launch the Gradio App
if __name__ == "__main__":
    iface.launch(server_name="0.0.0.0", server_port=7860)