aitr / app.py
admin08077's picture
Update app.py
aa6ed7e verified
import gradio as gr
from huggingface_hub import InferenceClient
import nltk
import json
import io
from fpdf import FPDF
from textblob import TextBlob
import PyPDF2
import tempfile
# Download NLTK punkt tokenizer if needed.
nltk.download("punkt", quiet=True)
###############################################################################
# Hugging Face Chat Code #
###############################################################################
"""
For more information on Hugging Face Inference API support, please check:
https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# Initialize your Hugging Face model client.
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(message, history: list[dict], system_message, max_tokens, temperature, top_p, file_content):
"""
Calls the model (in non-streaming mode) to get a complete response.
The file content is appended to the system message as context.
Expects conversation history in the format:
[{"role": "user", "content": ...}, {"role": "assistant", "content": ...}, ...]
"""
if file_content and file_content.strip():
system_message += "\n\nFile content:\n" + file_content
# Build messages list for the API request.
messages = [{"role": "system", "content": system_message}]
for entry in history:
messages.append(entry)
messages.append({"role": "user", "content": message})
try:
completion = client.chat_completion(
messages,
max_tokens=max_tokens,
stream=true, # Non-streaming mode for simplicity.
temperature=temperature,
top_p=top_p,
)
response = completion.choices[0].message["content"]
except Exception as e:
response = f"Error during model response: {e}"
return response
###############################################################################
# File Upload & Parsing Functions #
###############################################################################
def parse_file(file_obj):
"""
Parses an uploaded file.
Supports PDF (using PyPDF2) and text files (UTF-8 decoding).
"""
file_extension = file_obj.name.split('.')[-1].lower()
if file_extension == "pdf":
try:
reader = PyPDF2.PdfReader(file_obj)
text = ""
for page in reader.pages:
text += (page.extract_text() or "") + "\n"
return text
except Exception as e:
return f"Error reading PDF: {e}"
else:
try:
return file_obj.read().decode("utf-8", errors="ignore")
except Exception as e:
return f"Error reading file: {e}"
def load_files(files):
"""
Processes a list of uploaded files (provided as file paths).
Opens each file, parses its content, and concatenates the text.
"""
all_text = ""
for file_path in files:
try:
with open(file_path, "rb") as f:
content = parse_file(f)
all_text += content + "\n"
except Exception as e:
all_text += f"Error processing file {file_path}: {e}\n"
return all_text
###############################################################################
# Gradio UI Layout #
###############################################################################
with gr.Blocks() as demo:
gr.Markdown("# **Combined Chat & File Upload App**")
gr.Markdown(
"""
This app allows you to upload file(s) (e.g., PDF or TXT) and chat with an AI assistant that uses the uploaded file(s) for context throughout the conversation.
- **Upload File(s):** The file contents are automatically parsed and stored.
- **Chat:** Your message, along with the uploaded file content, is sent to the AI on every prompt.
"""
)
# State to hold the concatenated file content and conversation history.
file_content_state = gr.State("")
chat_history_state = gr.State([]) # List of dictionaries in the form {"role": "user"/"assistant", "content": ...}
# --- File Upload Section ---
# Use type="filepath" so that we get file paths for processing.
file_input = gr.File(label="Upload File(s)", file_count="multiple", type="filepath")
# Automatically process files upon upload.
file_input.change(fn=load_files, inputs=file_input, outputs=file_content_state)
gr.Markdown("## Chat")
chatbot = gr.Chatbot(label="Chat History", type="messages")
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...", lines=2)
# Additional model parameters (adjustable)
system_prompt = gr.Textbox(label="System Message", value="You are a helpful AI assistant.", interactive=True)
max_tokens_slider = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens")
temperature_slider = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
top_p_slider = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)")
def chat_fn(user_msg, history, file_content, system_msg, max_tokens, temperature, top_p):
if not user_msg.strip():
return "", history
# Append user's message (in the required format).
history.append({"role": "user", "content": user_msg})
# Get the AI's response.
response = respond(user_msg, history, system_msg, max_tokens, temperature, top_p, file_content)
# Append the assistant's response.
history.append({"role": "assistant", "content": response})
return "", history
# Trigger sending message on Enter in the textbox.
user_input.submit(
fn=chat_fn,
inputs=[user_input, chat_history_state, file_content_state, system_prompt, max_tokens_slider, temperature_slider, top_p_slider],
outputs=[user_input, chatbot],
queue=True
)
# Also add a "Send" button.
send_button = gr.Button("Send")
send_button.click(
fn=chat_fn,
inputs=[user_input, chat_history_state, file_content_state, system_prompt, max_tokens_slider, temperature_slider, top_p_slider],
outputs=[user_input, chatbot],
queue=True
)
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
if __name__ == "__main__":
demo.launch()