Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,6 +1,5 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
-
|
| 4 |
import nltk
|
| 5 |
import PyPDF2
|
| 6 |
|
|
@@ -13,35 +12,40 @@ nltk.download("punkt", quiet=True)
|
|
| 13 |
# Initialize the Hugging Face model client
|
| 14 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 15 |
|
| 16 |
-
def
|
| 17 |
"""
|
| 18 |
-
Calls the Hugging Face model for a response.
|
| 19 |
-
Includes file content in the system message for context.
|
| 20 |
"""
|
| 21 |
-
#
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
#
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
###############################################################################
|
| 47 |
# File Upload & Parsing Functions #
|
|
@@ -79,16 +83,50 @@ def load_files(files):
|
|
| 79 |
combined_text += f"Error processing file {file}: {e}\n"
|
| 80 |
return combined_text
|
| 81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
###############################################################################
|
| 83 |
# Gradio UI Layout #
|
| 84 |
###############################################################################
|
| 85 |
|
| 86 |
with gr.Blocks() as demo:
|
| 87 |
-
gr.Markdown("# **Chat with File Context**")
|
| 88 |
gr.Markdown(
|
| 89 |
"""
|
| 90 |
-
This app lets you upload file(s) and chat with an AI assistant.
|
| 91 |
-
Uploaded file content will
|
| 92 |
"""
|
| 93 |
)
|
| 94 |
|
|
@@ -111,14 +149,14 @@ Uploaded file content will provide context for the conversation.
|
|
| 111 |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
| 112 |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
|
| 113 |
|
| 114 |
-
# Chat Function
|
| 115 |
def chat_function(user_message, history, file_content, system_prompt, max_tokens, temperature, top_p):
|
| 116 |
if not user_message.strip():
|
| 117 |
return "", history
|
| 118 |
# Append user's message to the chat history
|
| 119 |
history.append((user_message, ""))
|
| 120 |
-
# Get response from the model
|
| 121 |
-
assistant_response =
|
| 122 |
history[-1] = (user_message, assistant_response)
|
| 123 |
return "", history
|
| 124 |
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
from huggingface_hub import InferenceClient
|
|
|
|
| 3 |
import nltk
|
| 4 |
import PyPDF2
|
| 5 |
|
|
|
|
| 12 |
# Initialize the Hugging Face model client
|
| 13 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 14 |
|
| 15 |
+
def respond_chunked(message, history, system_message, max_tokens, temperature, top_p, file_content):
|
| 16 |
"""
|
| 17 |
+
Calls the Hugging Face model for a response with support for chunked file content.
|
|
|
|
| 18 |
"""
|
| 19 |
+
# Split file content into manageable chunks
|
| 20 |
+
chunks = chunk_text(file_content, max_chunk_size=1500)
|
| 21 |
+
combined_response = ""
|
| 22 |
+
|
| 23 |
+
# Process each chunk and append to the response
|
| 24 |
+
for chunk in chunks:
|
| 25 |
+
# Append chunk to system message for context
|
| 26 |
+
chunked_system_message = f"{system_message}\n\nFile Content Chunk:\n{chunk}"
|
| 27 |
+
|
| 28 |
+
# Prepare the message payload
|
| 29 |
+
messages = [{"role": "system", "content": chunked_system_message}]
|
| 30 |
+
for user, assistant in history:
|
| 31 |
+
if user:
|
| 32 |
+
messages.append({"role": "user", "content": user})
|
| 33 |
+
if assistant:
|
| 34 |
+
messages.append({"role": "assistant", "content": assistant})
|
| 35 |
+
messages.append({"role": "user", "content": message})
|
| 36 |
+
|
| 37 |
+
try:
|
| 38 |
+
completion = client.chat_completion(
|
| 39 |
+
messages,
|
| 40 |
+
max_tokens=max_tokens,
|
| 41 |
+
temperature=temperature,
|
| 42 |
+
top_p=top_p,
|
| 43 |
+
)
|
| 44 |
+
combined_response += completion.choices[0].message["content"] + "\n"
|
| 45 |
+
except Exception as e:
|
| 46 |
+
combined_response += f"Error processing chunk: {e}\n"
|
| 47 |
+
|
| 48 |
+
return combined_response.strip()
|
| 49 |
|
| 50 |
###############################################################################
|
| 51 |
# File Upload & Parsing Functions #
|
|
|
|
| 83 |
combined_text += f"Error processing file {file}: {e}\n"
|
| 84 |
return combined_text
|
| 85 |
|
| 86 |
+
###############################################################################
|
| 87 |
+
# Chunking Function #
|
| 88 |
+
###############################################################################
|
| 89 |
+
|
| 90 |
+
def chunk_text(text, max_chunk_size=1500):
|
| 91 |
+
"""
|
| 92 |
+
Splits text into chunks of up to `max_chunk_size` tokens (approximate).
|
| 93 |
+
"""
|
| 94 |
+
from nltk.tokenize import sent_tokenize
|
| 95 |
+
|
| 96 |
+
sentences = sent_tokenize(text)
|
| 97 |
+
chunks = []
|
| 98 |
+
current_chunk = ""
|
| 99 |
+
current_tokens = 0
|
| 100 |
+
|
| 101 |
+
def approximate_token_count(text):
|
| 102 |
+
# Naive tokenization approximation
|
| 103 |
+
return len(text.split())
|
| 104 |
+
|
| 105 |
+
for sentence in sentences:
|
| 106 |
+
sentence_tokens = approximate_token_count(sentence)
|
| 107 |
+
if current_tokens + sentence_tokens <= max_chunk_size:
|
| 108 |
+
current_chunk += " " + sentence
|
| 109 |
+
current_tokens += sentence_tokens
|
| 110 |
+
else:
|
| 111 |
+
if current_chunk:
|
| 112 |
+
chunks.append(current_chunk.strip())
|
| 113 |
+
current_chunk = sentence
|
| 114 |
+
current_tokens = sentence_tokens
|
| 115 |
+
if current_chunk:
|
| 116 |
+
chunks.append(current_chunk.strip())
|
| 117 |
+
|
| 118 |
+
return chunks
|
| 119 |
+
|
| 120 |
###############################################################################
|
| 121 |
# Gradio UI Layout #
|
| 122 |
###############################################################################
|
| 123 |
|
| 124 |
with gr.Blocks() as demo:
|
| 125 |
+
gr.Markdown("# **Chat with File Context (Chunking for Large Files)**")
|
| 126 |
gr.Markdown(
|
| 127 |
"""
|
| 128 |
+
This app lets you upload large file(s) and chat with an AI assistant.
|
| 129 |
+
Uploaded file content will be processed in chunks to ensure smooth handling.
|
| 130 |
"""
|
| 131 |
)
|
| 132 |
|
|
|
|
| 149 |
temperature = gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature")
|
| 150 |
top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p")
|
| 151 |
|
| 152 |
+
# Chat Function with Chunking
|
| 153 |
def chat_function(user_message, history, file_content, system_prompt, max_tokens, temperature, top_p):
|
| 154 |
if not user_message.strip():
|
| 155 |
return "", history
|
| 156 |
# Append user's message to the chat history
|
| 157 |
history.append((user_message, ""))
|
| 158 |
+
# Get response from the model with chunking
|
| 159 |
+
assistant_response = respond_chunked(user_message, history, system_prompt, max_tokens, temperature, top_p, file_content)
|
| 160 |
history[-1] = (user_message, assistant_response)
|
| 161 |
return "", history
|
| 162 |
|