import os
import gradio as gr
from openai import OpenAI
from pypdf import PdfReader
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
# Set up OpenAI API key in HF secrets
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
client = OpenAI(api_key=OPENAI_API_KEY)
# Set up username and password in HF secrets
username = os.getenv('username')
password = os.getenv('password')
APP_PASSWORD = os.getenv('password', 'default_password')
# Function to chunk the document
def chunk_text(text, chunk_size=1000, overlap=100):
chunks = []
start = 0
while start < len(text):
end = start + chunk_size
chunk = text[start:end]
chunks.append(chunk)
start = end - overlap
return chunks
# Function to find the most relevant chunks
def get_relevant_chunks(query, chunks, top_n=3):
if not chunks:
return []
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform(chunks + [query])
cosine_similarities = cosine_similarity(tfidf_matrix[-1], tfidf_matrix[:-1]).flatten()
relevant_indices = cosine_similarities.argsort()[-top_n:][::-1]
return [chunks[i] for i in relevant_indices]
# Function to process multiple PDFs
def process_pdfs(pdf_files):
all_chunks = []
for pdf_file in pdf_files:
try:
reader = PdfReader(pdf_file)
full_text = ''.join(page.extract_text() for page in reader.pages)
chunks = chunk_text(full_text)
all_chunks.extend(chunks)
except Exception as e:
print(f"Error processing PDF {pdf_file}: {e}")
return all_chunks
# Add the paths to your desired knowledge base PDFs
try:
reference_documents = ["knowledge_base.pdf"]
text_chunks = process_pdfs(reference_documents)
except:
text_chunks = [] # If PDF doesn't exist, use empty chunks
instructions = os.getenv('INSTRUCTIONS')
def moderate_input(text):
"""Run input through OpenAI moderation API"""
try:
response = client.moderations.create(
model="omni-moderation-latest",
input=text
)
results = response.results[0]
if results.flagged:
return False, results.categories
return True, None
except Exception as e:
# Fail safe: allow text if moderation API is down
print(f"Moderation API error: {e}")
return True, None
def chat_with_assistant(message, history):
# Run moderation before processing
allowed, categories = moderate_input(message)
if not allowed:
return "⚠️ Sorry, I can’t respond to that request because it violates the usage policy."
# Find relevant chunks based on the user message
relevant_chunks = get_relevant_chunks(message, text_chunks)
context = "\n".join(relevant_chunks)
# Prepare the system message
system_message = f"""
#Role
-You are an impersonator and an educator.
-Your role is to adopt the personality, style, psychology, ideas, background, and circumstances of a historical figure.
-Your goal is to help students understand the historical figure better through and engaging conversation.
#Information
Your assigned historical figure is stated in your instructions:
{instructions}
Use the following as context for your answers.
{context}
However, use it seamlessly as background knowledge for a lively discussion and combine it with your own information. Do not provide citations or adopt a Q&A or academic tone.
#Important
-Always speak in the first person ("I") as the historical figure you are to incarnate.
-Always use appropriate language.
-Refuse to answer inappropriate questions or questions unrelated to your role and historical figure.
#Critical
-Important: Your knowledge of the world ends at the time of the death of your historical figure.
-Keep your responses concise and to the point. Avoid repetitions and always end on a period "." token
"""
# Prepare the message array
messages = [{"role": "system", "content": system_message}]
# Add conversation history
for human_msg, ai_msg in history:
if human_msg:
messages.append({"role": "user", "content": human_msg})
if ai_msg:
messages.append({"role": "assistant", "content": ai_msg})
# Add the current user message
messages.append({"role": "user", "content": message})
try:
# Make the API call
response = client.chat.completions.create(
model="gpt-4.1-mini",
messages=messages,
max_tokens=300,
)
return response.choices[0].message.content.strip()
except Exception as e:
return f"I apologize, but I'm having trouble responding right now. Error: {str(e)}"
# CSS for a blue-themed style
isp_theme = gr.themes.Default().set(
body_background_fill="#E6F3FF",
block_background_fill="#FFFFFF",
block_title_text_color="#003366",
block_label_background_fill="#B8D8FF",
input_background_fill="#FFFFFF",
button_primary_background_fill="#0066CC",
button_primary_background_fill_hover="#0052A3",
button_primary_text_color="#FFFFFF",
button_secondary_background_fill="#B8D8FF",
button_secondary_background_fill_hover="#99C2FF",
button_secondary_text_color="#003366",
block_border_width="1px",
block_border_color="#0066CC",
)
custom_css = """
#logo-img {
display: block;
margin: 0 auto;
width: 150px;
height: auto;
padding-bottom: 20px;
}
#disclaimer-footer {
width: 100%;
background-color: #B8D8FF;
color: #003366;
text-align: center;
padding: 10px 0;
font-size: 14px;
border-top: 1px solid #0066CC;
margin-top: 20px;
}
.container {
max-width: 1200px;
margin: 0 auto;
padding: 10px;
}
.title {
color: #003366;
margin-bottom: 10px;
text-align: center;
}
.button-row {
display: flex;
gap: 10px;
justify-content: center;
margin-bottom: 15px;
}
"""
# Environment variables
assistant_avatar = os.getenv('AVATAR')
assistant_title = os.getenv('TITLE', 'AI Assistant')
assistant_logo = os.getenv('LOGO')
# Check if credentials are set
if not username or not password:
with gr.Blocks(theme=isp_theme, css=custom_css) as demo:
gr.Markdown("# Configuration Error")
gr.Markdown("Username and password are not configured in Hugging Face secrets.")
gr.Markdown("Please set 'username' and 'password' in your Space secrets.")
demo.launch()
else:
# Main interface with login
with gr.Blocks(theme=isp_theme, css=custom_css) as demo:
# Login Screen
with gr.Row(visible=True) as login_screen:
with gr.Column():
if assistant_logo:
gr.HTML(f'')
gr.Markdown(f"
Invalid credentials. Please try again.
" ) def logout(): return ( gr.update(visible=True), # Show login screen gr.update(visible=False), # Hide main app "", # Clear error message [], # Clear chat history "" # Clear message input ) def user(user_message, history): return "", history + [[user_message, None]] def bot(history): if history and history[-1][0]: bot_message = chat_with_assistant(history[-1][0], history[:-1]) history[-1][1] = bot_message return history def delete_previous(history): if len(history) > 0: return history[:-1] return history # Login event handlers login_button.click( login, inputs=[username_input, password_input], outputs=[login_screen, main_app, error_message] ) password_input.submit( login, inputs=[username_input, password_input], outputs=[login_screen, main_app, error_message] ) # Logout event handler logout_button.click( logout, outputs=[login_screen, main_app, error_message, chatbot, msg] ) # Chat event handlers msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( bot, chatbot, chatbot ) submit.click(user, [msg, chatbot], [msg, chatbot], queue=False).then( bot, chatbot, chatbot ) undo.click(delete_previous, chatbot, chatbot) demo.launch()