Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import openai | |
| import os | |
| import tiktoken | |
| import re | |
| import time | |
| import pandas as pd | |
| # Set your OpenAI API key | |
| openai.api_key = os.getenv('OPENAI_API_KEY') | |
| # Pricing constants | |
| INPUT_COST_PER_TOKEN = 0.50 / 1_000_000 | |
| OUTPUT_COST_PER_TOKEN = 1.50 / 1_000_000 | |
| def print_like_dislike(x: gr.LikeData): | |
| print(x.index, x.value, x.liked) | |
| def add_text(history, text): | |
| history.append((text, "**That's cool!**")) | |
| return history | |
| def add_file(history, file): | |
| # Assuming you want to display the name of the uploaded file | |
| file_info = (f"Uploaded file: {file.name}", "") | |
| history.append(file_info) | |
| return history | |
| def num_tokens_from_messages(messages, model="gpt-3.5-turbo"): | |
| encoding = tiktoken.encoding_for_model(model) | |
| num_tokens = 0 | |
| for message in messages: | |
| num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n | |
| for key, value in message.items(): | |
| num_tokens += len(encoding.encode(value)) | |
| if key == "name": # if there's a name, the role is omitted | |
| num_tokens += 1 # role is always required and always 1 token | |
| num_tokens += 2 # every reply is primed with <im_start>assistant | |
| return num_tokens | |
| def initialize_chat(initial_question): | |
| # This function initializes the chat with the user-provided initial question. | |
| chat_history = [(None, initial_question)] | |
| response, follow_up_questions, token_info = generate_response(initial_question, 0) | |
| chat_history.append((None, response)) | |
| # Extract follow-up questions as examples | |
| follow_up_questions_formatted = [q.strip() for q in follow_up_questions.split('\n') if q.strip()] | |
| examples_state = [[q] for q in follow_up_questions_formatted] | |
| return chat_history, follow_up_questions, token_info, examples_state | |
| def generate_response(prompt, token_count=0): | |
| print(f"Received prompt: {prompt}") | |
| messages = [ | |
| {"role": "system", "content": "You are a friendly and helpful chatbot."}, | |
| {"role": "user", "content": prompt} | |
| ] | |
| try: | |
| input_tokens = num_tokens_from_messages(messages, model="gpt-3.5-turbo") | |
| response = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo", | |
| messages=messages, | |
| max_tokens=150, | |
| temperature=0.7, | |
| ) | |
| output_text = response.choices[0].message['content'].strip() | |
| output_tokens = response.usage['completion_tokens'] | |
| follow_up_prompt = f"Based on the following response, suggest three follow-up questions that a young person should ask in first person: {output_text}" | |
| follow_up_messages = [ | |
| {"role": "system", "content": "You are a friendly and helpful chatbot."}, | |
| {"role": "user", "content": follow_up_prompt} | |
| ] | |
| follow_up_response = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo", | |
| messages=follow_up_messages, | |
| max_tokens=100, | |
| temperature=0.7, | |
| ) | |
| follow_up_questions = follow_up_response.choices[0].message['content'].strip().split('\n') | |
| follow_up_questions = "\n".join(follow_up_questions) | |
| print(f"Follow up questions: {follow_up_questions}") | |
| topics_str = "Topic analysis not available" | |
| # Calculate the total tokens used | |
| total_input_tokens = input_tokens + num_tokens_from_messages(follow_up_messages, model="gpt-3.5-turbo") | |
| total_output_tokens = output_tokens + follow_up_response.usage['completion_tokens'] | |
| # Calculate cost | |
| input_cost = total_input_tokens * INPUT_COST_PER_TOKEN | |
| output_cost = total_output_tokens * OUTPUT_COST_PER_TOKEN | |
| total_cost = input_cost + output_cost | |
| # Adjusted to return the response and follow-up questions | |
| new_response = output_text + "\n\nTopics: " + topics_str | |
| token_info = f"### Token Usage:\n\n* Input Tokens: {total_input_tokens}\n* Output Tokens: {total_output_tokens}\n* Total Cost: ${total_cost:.4f}" | |
| except Exception as e: | |
| new_response = f"Error generating response: {e}" | |
| follow_up_questions = [] | |
| token_info = "### Token Usage:\n\n* Input Tokens: 0\n* Output Tokens: 0\n* Total Cost: $0.0000" | |
| return new_response, follow_up_questions, token_info, | |
| def process_response(prompt, chat_history, token_count, examples_state): | |
| global examples | |
| response, new_follow_up_questions, token_info = generate_response(prompt, token_count) | |
| chat_history.append((prompt, response)) | |
| # Split and format the new follow-up questions into separate values | |
| new_follow_up_questions_formatted = [q.strip() for q in new_follow_up_questions.split('\n')[:3]] | |
| question1, question2, question3 = new_follow_up_questions_formatted | |
| # Update examples state with the new follow-up questions | |
| examples_state = [[question1], [question2], [question3]] | |
| follow_up_questions_md = "\n".join(new_follow_up_questions_formatted) | |
| return chat_history, token_info, follow_up_questions_md, examples_state | |
| # CSS for the phone layout and background | |
| css = """ | |
| #chat-container { | |
| max-width: 400px!Important; | |
| margin: auto; | |
| border: 1px solid #ccc; | |
| border-radius: 20px; | |
| overflow: hidden; | |
| background: url('https://path-to-your-phone-background-image.png') no-repeat center center; | |
| background-size: cover; | |
| padding: 20px; | |
| box-sizing: border-box; | |
| display: flex; | |
| flex-direction: column; | |
| } | |
| #chatbot { | |
| height: calc(100% - 50px); | |
| overflow-y: auto; | |
| background: transparent; | |
| width: 100%; | |
| } | |
| #component-10 { | |
| font-size: 7px; | |
| padding: 5px; | |
| margin: 5px 0; | |
| width: 100%; | |
| } | |
| #example-container .gr-examples { | |
| font-size: 0.9em; | |
| padding: 5px; | |
| margin: 5px 0; | |
| } | |
| """ | |
| # Initialize the chat history and suggested questionsw | |
| # Initialize the chat history and suggested questions | |
| #chat_history, follow_up_questions, initial_token_info = initialize_chat("I'm 14 years old female and want to become a graphic designer. I'm living in Uttar Pradesh in India. How can I start?") | |
| # Initial example questions from the initialization | |
| questions = [["I'm 14 years old girl living in Utar Pradesh show where I can take HPV vaccine and more information about the HPV"], ["I'm 14 years old female and want to become a graphic designer. I'm living in Uttar Pradesh in India. How can I start?"],["Im a 15 years old bout living in New Delhi how i can lear more about the climate change and What I can do myself?"]] | |
| with gr.Blocks(css=css) as demo: | |
| examples_state = gr.State([]) | |
| chat_history = gr.State([]) | |
| token_info = gr.State("") | |
| follow_up_questions_md = gr.State("") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown( | |
| """ | |
| # Child safe chatbot project! | |
| In the realm of digital communication, the development of an advanced chatbot that incorporates topic modeling represents a significant leap towards enhancing user interaction and maintaining focus during conversations. This innovative chatbot design is specifically engineered to streamline discussions by guiding users to select from a curated list of suggested questions. This approach is crafted to mitigate the risk of diverging into off-topic dialogues, which are common pitfalls in conventional chatbot systems. | |
| """ | |
| ) | |
| token_info_display = gr.Markdown( | |
| value="", | |
| elem_id="token-info" | |
| ) | |
| follow_up_questions_display = gr.Markdown( | |
| value="", | |
| elem_id="follow-up-questions", | |
| label="Follow up Questions" | |
| ) | |
| initial_question_input = gr.Textbox( | |
| placeholder="Type your initial question here...", | |
| label="Initial Question" | |
| ) | |
| initialize_button = gr.Button("Initialize Chat") | |
| question_examples = gr.Examples( | |
| examples = questions, | |
| inputs = initial_question_input, | |
| label = "Intial Questions" | |
| ) | |
| with gr.Column(scale=1, elem_id="chat-container"): | |
| chatbot = gr.Chatbot( | |
| value=[], | |
| elem_id="chatbot", | |
| bubble_full_width=False, | |
| label="Safe Chatbot v1" | |
| ) | |
| with gr.Row(): | |
| txt = gr.Textbox(scale=4, show_label=False, placeholder="Select question...", container=False, interactive=False) | |
| btn = gr.Button("Submit") | |
| btn.click( | |
| fn=process_response, | |
| inputs=[txt, chat_history, gr.State(0), examples_state], | |
| outputs=[chatbot, token_info_display, follow_up_questions_display, examples_state] | |
| ) | |
| examples_component = gr.Examples( | |
| examples=examples_state.value, | |
| inputs=[txt], | |
| label="Questions" | |
| ) | |
| chatbot.like(print_like_dislike, None, None) | |
| initialize_button.click( | |
| fn=initialize_chat, | |
| inputs=[initial_question_input], | |
| outputs=[chat_history, follow_up_questions_display, token_info_display, examples_state] | |
| ).then( | |
| fn=lambda chat_history: chat_history, | |
| inputs=[chat_history], | |
| outputs=[chatbot] | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(share=False) | |