| | import os |
| | import gradio as gr |
| | from openai import OpenAI |
| | from pypdf import PdfReader |
| | from sklearn.feature_extraction.text import TfidfVectorizer |
| | from sklearn.metrics.pairwise import cosine_similarity |
| |
|
| | |
| | OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') |
| | os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY |
| | client = OpenAI(api_key=OPENAI_API_KEY) |
| |
|
| | |
| | username = os.getenv('username') |
| | password = os.getenv('password') |
| | APP_PASSWORD = os.getenv('password', 'default_password') |
| |
|
| | |
| | def chunk_text(text, chunk_size=1000, overlap=100): |
| | chunks = [] |
| | start = 0 |
| | while start < len(text): |
| | end = start + chunk_size |
| | chunk = text[start:end] |
| | chunks.append(chunk) |
| | start = end - overlap |
| | return chunks |
| |
|
| | |
| | def get_relevant_chunks(query, chunks, top_n=3): |
| | if not chunks: |
| | return [] |
| | vectorizer = TfidfVectorizer() |
| | tfidf_matrix = vectorizer.fit_transform(chunks + [query]) |
| | cosine_similarities = cosine_similarity(tfidf_matrix[-1], tfidf_matrix[:-1]).flatten() |
| | relevant_indices = cosine_similarities.argsort()[-top_n:][::-1] |
| | return [chunks[i] for i in relevant_indices] |
| |
|
| | |
| | def process_pdfs(pdf_files): |
| | all_chunks = [] |
| | for pdf_file in pdf_files: |
| | try: |
| | reader = PdfReader(pdf_file) |
| | full_text = ''.join(page.extract_text() for page in reader.pages) |
| | chunks = chunk_text(full_text) |
| | all_chunks.extend(chunks) |
| | except Exception as e: |
| | print(f"Error processing PDF {pdf_file}: {e}") |
| | return all_chunks |
| |
|
| | |
| | try: |
| | reference_documents = ["knowledge_base.pdf"] |
| | text_chunks = process_pdfs(reference_documents) |
| | except: |
| | text_chunks = [] |
| |
|
| | instructions = os.getenv('INSTRUCTIONS') |
| |
|
| | def moderate_input(text): |
| | """Run input through OpenAI moderation API""" |
| | try: |
| | response = client.moderations.create( |
| | model="omni-moderation-latest", |
| | input=text |
| | ) |
| | results = response.results[0] |
| | if results.flagged: |
| | return False, results.categories |
| | return True, None |
| | except Exception as e: |
| | |
| | print(f"Moderation API error: {e}") |
| | return True, None |
| |
|
| | def chat_with_assistant(message, history): |
| | |
| | allowed, categories = moderate_input(message) |
| | if not allowed: |
| | return "⚠️ Sorry, I can’t respond to that request because it violates the usage policy." |
| |
|
| | |
| | relevant_chunks = get_relevant_chunks(message, text_chunks) |
| | context = "\n".join(relevant_chunks) |
| |
|
| | |
| | system_message = f""" |
| | #Role |
| | -You are an impersonator and an educator. |
| | -Your role is to adopt the personality, style, psychology, ideas, background, and circumstances of a historical figure. |
| | -Your goal is to help students understand the historical figure better through and engaging conversation. |
| | |
| | #Information |
| | Your assigned historical figure is stated in your instructions: |
| | {instructions} |
| | |
| | Use the following as context for your answers. |
| | {context} |
| | However, use it seamlessly as background knowledge for a lively discussion and combine it with your own information. Do not provide citations or adopt a Q&A or academic tone. |
| | #Important |
| | -Always speak in the first person ("I") as the historical figure you are to incarnate. |
| | -Always use appropriate language. |
| | -Refuse to answer inappropriate questions or questions unrelated to your role and historical figure. |
| | #Critical |
| | -Important: Your knowledge of the world ends at the time of the death of your historical figure. |
| | -Keep your responses concise and to the point. Avoid repetitions and always end on a period "." token |
| | """ |
| |
|
| | |
| | messages = [{"role": "system", "content": system_message}] |
| |
|
| | |
| | for human_msg, ai_msg in history: |
| | if human_msg: |
| | messages.append({"role": "user", "content": human_msg}) |
| | if ai_msg: |
| | messages.append({"role": "assistant", "content": ai_msg}) |
| |
|
| | |
| | messages.append({"role": "user", "content": message}) |
| |
|
| | try: |
| | |
| | response = client.chat.completions.create( |
| | model="gpt-4.1-mini", |
| | messages=messages, |
| | max_tokens=300, |
| | ) |
| |
|
| | return response.choices[0].message.content.strip() |
| | except Exception as e: |
| | return f"I apologize, but I'm having trouble responding right now. Error: {str(e)}" |
| |
|
| | |
| | isp_theme = gr.themes.Default().set( |
| | body_background_fill="#E6F3FF", |
| | block_background_fill="#FFFFFF", |
| | block_title_text_color="#003366", |
| | block_label_background_fill="#B8D8FF", |
| | input_background_fill="#FFFFFF", |
| | button_primary_background_fill="#0066CC", |
| | button_primary_background_fill_hover="#0052A3", |
| | button_primary_text_color="#FFFFFF", |
| | button_secondary_background_fill="#B8D8FF", |
| | button_secondary_background_fill_hover="#99C2FF", |
| | button_secondary_text_color="#003366", |
| | block_border_width="1px", |
| | block_border_color="#0066CC", |
| | ) |
| |
|
| | custom_css = """ |
| | #logo-img { |
| | display: block; |
| | margin: 0 auto; |
| | width: 150px; |
| | height: auto; |
| | padding-bottom: 20px; |
| | } |
| | #disclaimer-footer { |
| | width: 100%; |
| | background-color: #B8D8FF; |
| | color: #003366; |
| | text-align: center; |
| | padding: 10px 0; |
| | font-size: 14px; |
| | border-top: 1px solid #0066CC; |
| | margin-top: 20px; |
| | } |
| | .container { |
| | max-width: 1200px; |
| | margin: 0 auto; |
| | padding: 10px; |
| | } |
| | .title { |
| | color: #003366; |
| | margin-bottom: 10px; |
| | text-align: center; |
| | } |
| | .button-row { |
| | display: flex; |
| | gap: 10px; |
| | justify-content: center; |
| | margin-bottom: 15px; |
| | } |
| | """ |
| |
|
| | |
| | assistant_avatar = os.getenv('AVATAR') |
| | assistant_title = os.getenv('TITLE', 'AI Assistant') |
| | assistant_logo = os.getenv('LOGO') |
| |
|
| | |
| | if not username or not password: |
| | with gr.Blocks(theme=isp_theme, css=custom_css) as demo: |
| | gr.Markdown("# Configuration Error") |
| | gr.Markdown("Username and password are not configured in Hugging Face secrets.") |
| | gr.Markdown("Please set 'username' and 'password' in your Space secrets.") |
| | |
| | demo.launch() |
| | else: |
| | |
| | with gr.Blocks(theme=isp_theme, css=custom_css) as demo: |
| | |
| | with gr.Row(visible=True) as login_screen: |
| | with gr.Column(): |
| | if assistant_logo: |
| | gr.HTML(f'<img id="logo-img" src="{assistant_logo}" alt="Assistant Logo" onerror="this.style.display=\'none\';">') |
| | gr.Markdown(f"<h1 style='text-align: center; color: #003366;'>{assistant_title}</h1>") |
| | gr.Markdown("<h3 style='text-align: center; color: #003366;'>Please enter your credentials to continue.</h3>") |
| | username_input = gr.Textbox(label="Username", placeholder="Enter username...") |
| | password_input = gr.Textbox(label="Password", type="password", placeholder="Enter password...") |
| | login_button = gr.Button("Login", variant="primary") |
| | error_message = gr.Markdown() |
| |
|
| | |
| | with gr.Row(visible=False) as main_app: |
| | with gr.Column(elem_classes="container"): |
| | |
| | if assistant_logo: |
| | gr.HTML(f'<img id="logo-img" src="{assistant_logo}" alt="Assistant Logo" onerror="this.style.display=\'none\';">') |
| | gr.Markdown(f"# {assistant_title}", elem_classes="title") |
| |
|
| | |
| | chatbot = gr.Chatbot(height=500, avatar_images=(None, assistant_avatar)) |
| |
|
| | msg = gr.Textbox(placeholder="Type your message here...", container=False, scale=7) |
| |
|
| | with gr.Row(elem_classes="button-row"): |
| | submit = gr.Button("Submit", variant="primary") |
| | clear = gr.ClearButton([msg, chatbot], value="Clear", variant="secondary") |
| | undo = gr.Button("Delete Previous", variant="secondary") |
| | logout_button = gr.Button("Logout", variant="secondary") |
| |
|
| | gr.HTML('<div id="disclaimer-footer">You are chatting with an AI assistant. Make sure to evaluate the accuracy of its answers.</div>') |
| |
|
| | def login(entered_username, entered_password): |
| | if entered_username == username and entered_password == password: |
| | return ( |
| | gr.update(visible=False), |
| | gr.update(visible=True), |
| | "" |
| | ) |
| | else: |
| | return ( |
| | gr.update(visible=True), |
| | gr.update(visible=False), |
| | "<p style='color: red; text-align: center;'>Invalid credentials. Please try again.</p>" |
| | ) |
| |
|
| | def logout(): |
| | return ( |
| | gr.update(visible=True), |
| | gr.update(visible=False), |
| | "", |
| | [], |
| | "" |
| | ) |
| |
|
| | def user(user_message, history): |
| | return "", history + [[user_message, None]] |
| |
|
| | def bot(history): |
| | if history and history[-1][0]: |
| | bot_message = chat_with_assistant(history[-1][0], history[:-1]) |
| | history[-1][1] = bot_message |
| | return history |
| |
|
| | def delete_previous(history): |
| | if len(history) > 0: |
| | return history[:-1] |
| | return history |
| |
|
| | |
| | login_button.click( |
| | login, |
| | inputs=[username_input, password_input], |
| | outputs=[login_screen, main_app, error_message] |
| | ) |
| |
|
| | password_input.submit( |
| | login, |
| | inputs=[username_input, password_input], |
| | outputs=[login_screen, main_app, error_message] |
| | ) |
| |
|
| | |
| | logout_button.click( |
| | logout, |
| | outputs=[login_screen, main_app, error_message, chatbot, msg] |
| | ) |
| |
|
| | |
| | msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( |
| | bot, chatbot, chatbot |
| | ) |
| | submit.click(user, [msg, chatbot], [msg, chatbot], queue=False).then( |
| | bot, chatbot, chatbot |
| | ) |
| | undo.click(delete_previous, chatbot, chatbot) |
| |
|
| | demo.launch() |