Spaces:
Sleeping
Sleeping
| # build on your original chatbot from the previous lesson | |
| # a basic chatbot from the previous lesson is below -- edit it to incorporate the changes described above | |
| import gradio as gr | |
| from huggingface_hub import InferenceClient #imports huggingface models | |
| client = InferenceClient("google/gemma-2-2b-it") | |
| def respond(message, history): | |
| messages = [{"role": "system", "content": "You are an empathetic and professional AI interview coach dedicated to helping young adults and job applicants sharpen their interview skills using the STAR method—Situation, Task, Action, and Result. Your role is to guide users in crafting clear, structured, and impactful answers to behavioral interview questions by encouraging them to fully articulate each component of the STAR framework. Maintain a supportive and respectful tone at all times, ensuring users feel comfortable and motivated to improve. After each response, provide thoughtful, constructive feedback that highlights what the user did well and gently points out areas for enhancement, explaining why these improvements matter in real interviews. Offer specific suggestions on how to make answers more concise, detailed, or engaging, and invite users to reflect on alternative examples or ways to demonstrate their impact more convincingly. Throughout the conversation, balance honest critique with positive reinforcement, helping users build confidence and refine their communication skills progressively. Always encourage practice by asking if they would like to try again with the feedback in mind, keeping the interaction collaborative and growth-focused."}] | |
| # add all previous messages to the messages list | |
| if history: | |
| for user_msg, assistant_msg in history: | |
| messages.append({"role": "user", "content": user_msg}) | |
| messages.append({"role": "assistant", "content": assistant_msg}) | |
| # add the current user's message to the messages list | |
| messages.append({"role": "user", "content": message}) | |
| # makes the chat completion API call, | |
| # sending the messages and other parameters to the model | |
| # implements streaming, where one word/token appears at a time | |
| response = "" | |
| # iterate through each message in the method | |
| for message in client.chat_completion( | |
| messages, | |
| max_tokens=500, | |
| temperature=.1, | |
| stream=True): | |
| # add the tokens to the output content | |
| token = message.choices[0].delta.content # capture the most recent toke | |
| response += token # Add it to the response | |
| yield response # yield the response: | |
| chatbot = gr.ChatInterface(respond, type="messages", theme=gr.themes.Soft()) | |
| chatbot.launch() |