Spaces:
Runtime error
Runtime error
| import openai | |
| import gradio as gr | |
| import os | |
| # configure OpenAI | |
| openai.api_key = os.environ["OPENAI_API_KEY"] | |
| INSTRUCTIONS = "I want you to respond to users strictly with knowledge from Nisargadatta Maharaj, " \ | |
| "Ramana Maharishi, Swami Vivekananda, Ashtavakra, Adi Shankaracharya and Sage Vasishta among other great Advaita Vedanta Masters " \ | |
| "I want you to answer users with questions, as if you were their therapist, in a dialogue between " \ | |
| "student and teacher, guru and disciple." \ | |
| "Try not to provide or give away a full answers in the beginning. Have a conversation. Ask questions. " \ | |
| "Inspire the user to get to the answer of his/her own question " \ | |
| "Act as of one of these great Vedanta Masters when responding" \ | |
| "Limit your answers to no more than 100 words" | |
| TEMPERATURE = 0.5 | |
| MAX_TOKENS = 500 | |
| FREQUENCY_PENALTY = 0 | |
| PRESENCE_PENALTY = 0.6 | |
| # limits how many questions we include in the prompt | |
| MAX_CONTEXT_QUESTIONS = 10 | |
| def get_response(instructions, previous_questions_and_answers, new_question): | |
| """Get a response from ChatCompletion | |
| Args: | |
| instructions: The instructions for the chat bot - this determines how it will behave | |
| previous_questions_and_answers: Chat history | |
| new_question: The new question to ask the bot | |
| Returns: | |
| The response text | |
| """ | |
| # build the messages | |
| messages = [ | |
| { "role": "system", "content": instructions }, | |
| ] | |
| # add the previous questions and answers | |
| for question, answer in previous_questions_and_answers[-MAX_CONTEXT_QUESTIONS:]: | |
| messages.append({ "role": "user", "content": question }) | |
| messages.append({ "role": "assistant", "content": answer }) | |
| # add the new question | |
| messages.append({ "role": "user", "content": new_question }) | |
| completion = openai.ChatCompletion.create( | |
| model="gpt-3.5-turbo", | |
| messages=messages, | |
| temperature=TEMPERATURE, | |
| max_tokens=MAX_TOKENS, | |
| top_p=1, | |
| frequency_penalty=FREQUENCY_PENALTY, | |
| presence_penalty=PRESENCE_PENALTY, | |
| ) | |
| return completion.choices[0].message.content | |
| def get_moderation(question): | |
| """ | |
| Check the question is safe to ask the model | |
| Parameters: | |
| question (str): The question to check | |
| Returns a list of errors if the question is not safe, otherwise returns None | |
| """ | |
| errors = { | |
| "hate": "Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste.", | |
| "hate/threatening": "Hateful content that also includes violence or serious harm towards the targeted group.", | |
| "self-harm": "Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders.", | |
| "sexual": "Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness).", | |
| "sexual/minors": "Sexual content that includes an individual who is under 18 years old.", | |
| "violence": "Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.", | |
| "violence/graphic": "Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.", | |
| } | |
| response = openai.Moderation.create(input=question) | |
| if response.results[0].flagged: | |
| # get the categories that are flagged and generate a message | |
| result = [ | |
| error | |
| for category, error in errors.items() | |
| if response.results[0].categories[category] | |
| ] | |
| return result | |
| return None | |
| # def main(): | |
| # os.system("cls" if os.name == "nt" else "clear") | |
| # # keep track of previous questions and answers | |
| # previous_questions_and_answers = [] | |
| # while True: | |
| # # ask the user for their question | |
| # new_question = input( | |
| # Fore.GREEN + Style.BRIGHT + "wwww?: " + Style.RESET_ALL | |
| # ) | |
| # # check the question is safe | |
| # errors = get_moderation(new_question) | |
| # if errors: | |
| # print( | |
| # Fore.RED | |
| # + Style.BRIGHT | |
| # + "Sorry, you're question didn't pass the moderation check:" | |
| # ) | |
| # for error in errors: | |
| # print(error) | |
| # print(Style.RESET_ALL) | |
| # continue | |
| # response = get_response(INSTRUCTIONS, previous_questions_and_answers, new_question) | |
| # # add the new question and answer to the list of previous questions and answers | |
| # previous_questions_and_answers.append((new_question, response)) | |
| def delete_chat_history(previous_questions_and_answers): | |
| previous_questions_and_answers.clear() | |
| return previous_questions_and_answers,"" | |
| def chatgpt_clone(input, previous_questions_and_answers): | |
| previous_questions_and_answers = previous_questions_and_answers or [] | |
| s = list(sum(previous_questions_and_answers, ())) | |
| s.append(input) | |
| inp = ' '.join(s) | |
| moderation_errors = get_moderation(input) | |
| if moderation_errors is not None: | |
| return "\n".join(moderation_errors) | |
| output = get_response(INSTRUCTIONS, previous_questions_and_answers, inp) | |
| previous_questions_and_answers.append((input, output)) | |
| return previous_questions_and_answers, previous_questions_and_answers | |
| block = gr.Blocks(theme=gr.themes.Monochrome(secondary_hue="neutral").set(button_primary_background_fill="*primary_400", | |
| button_primary_background_fill_hover="*primary_300"),css="footer {visibility: hidden}") | |
| with block: | |
| # gr.Markdown("""<h1><center>_/\_ AI YOGI _/\_ </center></h1>""") | |
| chatbot = gr.Chatbot(label="Ai Yogi:") | |
| message = gr.Textbox(label="Namaste! How may I serve you?",placeholder="What concerns you now?") | |
| # message.change(fn=lambda value: gr.update(value="")) | |
| state = gr.State() | |
| submit = gr.Button("SEND") | |
| submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state]) | |
| clear = gr.Button("CLEAR") | |
| clear.click(delete_chat_history, inputs=[state], outputs=[chatbot, state]) | |
| clear.click(lambda x: gr.update(value='',placeholder="What concerns you now?",label="Namaste! How may I serve you?"), [],[message]) | |
| submit.click(lambda x: gr.update(value='',placeholder="",label="You may continue with the conversation below"), [],[message]) | |
| submit.click(lambda x: gr.update(label='Ai Yogi:'), [],[chatbot]) | |
| clear.click(lambda x: gr.update(label='Ai Yogi:'), [],[chatbot]) | |
| message.submit(lambda x: gr.update(value='',placeholder="",label=""), [],[message]) | |
| block.launch(show_api=False) | |