import os os.environ["OPENAI_API_KEY"] from llama_index.llms.openai import OpenAI from llama_index.core.schema import MetadataMode import openai from openai import OpenAI as OpenAIOG import logging import sys llm = OpenAI(temperature=0.0, model="gpt-3.5-turbo") client = OpenAIOG() from deep_translator import GoogleTranslator # Load index from llama_index.core import VectorStoreIndex from llama_index.core import StorageContext from llama_index.core import load_index_from_storage storage_context = StorageContext.from_defaults(persist_dir="single_parse") index = load_index_from_storage(storage_context) query_engine = index.as_query_engine(similarity_top_k=3, llm=llm) retriever = index.as_retriever(similarity_top_k = 3) import gradio as gr import re import json from datetime import datetime acknowledgment_keywords = ["thanks", "thank you", "thx", "ok", "okay", "great", "got it", "appreciate", "good", "makes sense"] follow_up_keywords = ["but", "also", "and", "what", "how", "why", "when", "is", "?"] greeting_keywords = ["hi", "hello", "hey", "how's it", "what's up", "yo", "howdy"] def contains_exact_word_or_phrase(text, keywords): text = text.lower() for keyword in keywords: if re.search(r'\b' + re.escape(keyword) + r'\b', text): return True return False def contains_greeting(question): # Check if the question contains acknowledgment keywords return contains_exact_word_or_phrase(question, greeting_keywords) def contains_acknowledgment(question): # Check if the question contains acknowledgment keywords return contains_exact_word_or_phrase(question, acknowledgment_keywords) def contains_follow_up(question): # Check if the question contains follow-up indicators return contains_exact_word_or_phrase(question, follow_up_keywords) def convert_to_date(date_str): return datetime.strptime(date_str, "%Y%m%d") def idahun(question: str, conversation_history: list[str]): # Get conversation history context = " ".join([item["user"] + " " + item["chatbot"] for item in conversation_history]) source0 = "RAG not run" source1 = "RAG not run" source2 = "RAG not run" ## Process greeting # greet_response = process_greeting_response(question) if contains_greeting(question) and not contains_follow_up(question): greeting = ( f" The user previously asked and answered the following: {context}. " f" The user just provided the following greeting: {question}. " "Please respond accordingly." ) completion = client.chat.completions.create( model="gpt-4o", messages=[ {"role": "user", "content": greeting} ] ) reply_to_user = completion.choices[0].message.content conversation_history.append({"user": question, "chatbot": reply_to_user}) return reply_to_user, source0, source1, source2, conversation_history ## Process acknowledgment if contains_acknowledgment(question) and not contains_follow_up(question): acknowledgment = ( f" The user previously asked and answered the following: {context}. " f" The user just provided the following acknowledgement: {question}. " "Please respond accordingly in English." ) completion = client.chat.completions.create( model="gpt-4o", messages=[ {"role": "user", "content": acknowledgment} ] ) reply_to_user = completion.choices[0].message.content conversation_history.append({"user": question, "chatbot": reply_to_user}) return reply_to_user, source0, source1, source2, conversation_history ## If not greeting or acknowledgement, then proceed with RAG # Retrieve sources sources = retriever.retrieve(question) source0 = sources[0].text source1 = sources[1].text source2 = sources[2].text background = ("The person who asked the question is a person living with HIV." " They are asking questions about HIV. Do not talk about anything that is not related to HIV. " " Recognize that they already have HIV and do not suggest that they have to get tested" " for HIV or take post-exposure prophylaxis, as that is not relevant, though their partners perhaps should." " Do not suggest anything that is not relevant to someone who already has HIV." " Do not mention in the response that the person is living with HIV.") # Combine into final prompt - user background, conversation history, new question, retrieved sources question_final = ( f" The user previously asked and answered the following: {context}. " f" The user just asked the following question: {question}." f" Please use the following content to generate a response: {source0} {source1} {source2}." f" Please consider the following background information when generating a response: {background}." " Keep answers brief and limited to the question that was asked." " If they share a greeting, just greet them in return and ask if they have a question." " Do not change the subject or address anything the user didn't directly ask about." " If they respond with an acknowledgement, simply thank them." " Do not discuss anything other than HIV. If they ask a question that is not about HIV, respond that" " you are only able to discuss HIV." " Keep the response to under 50 words and use simple language. The person asking the question does not know technical terms." ) # Generate response completion = client.chat.completions.create( model="gpt-4o", messages=[ {"role": "user", "content": question_final} ] ) # Collect response reply_to_user = completion.choices[0].message.content # add question and reply to conversation history conversation_history.append({"user": question, "chatbot": reply_to_user}) return reply_to_user, source0, source1, source2, conversation_history demo = gr.Interface( title = "Idahun Chatbot Demo (English)", fn=idahun, inputs=["text", gr.State(value=[])], outputs = [ gr.Textbox(label="Chatbot Response", type="text"), gr.Textbox(label="Source 1", max_lines = 10, autoscroll = False, type="text"), gr.Textbox(label="Source 2", max_lines = 10, autoscroll = False, type="text"), gr.Textbox(label="Source 3", max_lines = 10, autoscroll = False, type="text"), gr.State() ], ) demo.launch()