Update app.py
Browse files
app.py
CHANGED
|
@@ -1,484 +1,39 @@
|
|
| 1 |
-
|
| 2 |
-
from
|
| 3 |
-
from llama_index.llms.openai import OpenAI
|
| 4 |
-
from llama_index.core import Settings
|
| 5 |
import os
|
| 6 |
-
import
|
| 7 |
-
from
|
| 8 |
-
import json
|
| 9 |
-
from datetime import datetime
|
| 10 |
-
import hashlib
|
| 11 |
|
| 12 |
-
#
|
| 13 |
-
chat_engine = None
|
| 14 |
-
conversation_history = []
|
| 15 |
-
current_api_key_hash = None
|
| 16 |
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
return hashlib.sha256(api_key.encode()).hexdigest()
|
| 20 |
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
text += page.extract_text() + '\n'
|
| 27 |
-
return text
|
| 28 |
|
| 29 |
-
|
| 30 |
-
def read_docx(file_path):
|
| 31 |
-
doc = DocxDocument(file_path)
|
| 32 |
-
text = ''
|
| 33 |
-
for paragraph in doc.paragraphs:
|
| 34 |
-
text += paragraph.text + '\n'
|
| 35 |
-
return text
|
| 36 |
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
global chat_engine, current_api_key_hash
|
| 40 |
-
|
| 41 |
-
if not files or not api_key:
|
| 42 |
-
return "Please provide both API key and files to proceed."
|
| 43 |
-
|
| 44 |
-
try:
|
| 45 |
-
docs = []
|
| 46 |
-
for file in files:
|
| 47 |
-
if file.name.endswith('.pdf'):
|
| 48 |
-
text = read_pdf(file.name)
|
| 49 |
-
docs.append(Document(text=text))
|
| 50 |
-
elif file.name.endswith('.docx'):
|
| 51 |
-
text = read_docx(file.name)
|
| 52 |
-
docs.append(Document(text=text))
|
| 53 |
-
|
| 54 |
-
# Set OpenAI API key
|
| 55 |
-
os.environ["OPENAI_API_KEY"] = api_key
|
| 56 |
-
|
| 57 |
-
# Store hashed API key
|
| 58 |
-
current_api_key_hash = hash_api_key(api_key)
|
| 59 |
-
|
| 60 |
-
Settings.llm = OpenAI(
|
| 61 |
-
model="gpt-3.5-turbo",
|
| 62 |
-
temperature=0.5,
|
| 63 |
-
api_key=api_key,
|
| 64 |
-
system_prompt="You are an expert on the Streamlit Python library and your job is to answer technical questions. Assume that all questions are related to the Streamlit Python library. Keep your answers technical and based on facts – do not hallucinate features."
|
| 65 |
-
)
|
| 66 |
-
|
| 67 |
-
index = VectorStoreIndex.from_documents(docs)
|
| 68 |
-
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
|
| 69 |
-
|
| 70 |
-
return "Documents loaded and indexed successfully! You can now start chatting."
|
| 71 |
-
except Exception as e:
|
| 72 |
-
return f"Error loading documents: {str(e)}"
|
| 73 |
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
if not api_key:
|
| 79 |
-
return history + [{"role": "assistant", "content": "Please enter your OpenAI API key first."}]
|
| 80 |
-
|
| 81 |
-
# Set current API key hash if not set
|
| 82 |
-
if current_api_key_hash is None:
|
| 83 |
-
current_api_key_hash = hash_api_key(api_key)
|
| 84 |
-
|
| 85 |
-
if chat_engine is None:
|
| 86 |
-
return history + [
|
| 87 |
-
{"role": "user", "content": message},
|
| 88 |
-
{"role": "assistant", "content": "Please upload and load documents first before asking questions."}
|
| 89 |
-
]
|
| 90 |
-
|
| 91 |
-
try:
|
| 92 |
-
response = chat_engine.chat(message)
|
| 93 |
-
conversation_history.append({"role": "user", "content": message})
|
| 94 |
-
conversation_history.append({"role": "assistant", "content": response.response})
|
| 95 |
-
|
| 96 |
-
return history + [
|
| 97 |
-
{"role": "user", "content": message},
|
| 98 |
-
{"role": "assistant", "content": response.response}
|
| 99 |
-
]
|
| 100 |
-
except Exception as e:
|
| 101 |
-
return history + [
|
| 102 |
-
{"role": "user", "content": message},
|
| 103 |
-
{"role": "assistant", "content": f"Error: {str(e)}"}
|
| 104 |
-
]
|
| 105 |
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
global conversation_history, current_api_key_hash
|
| 109 |
-
|
| 110 |
-
if not api_key:
|
| 111 |
-
return "Please enter your OpenAI API key first.", gr.update()
|
| 112 |
-
|
| 113 |
-
if not conversation_history:
|
| 114 |
-
return "No conversation to save.", gr.update()
|
| 115 |
-
|
| 116 |
-
try:
|
| 117 |
-
# Set current API key hash if not set
|
| 118 |
-
if current_api_key_hash is None:
|
| 119 |
-
current_api_key_hash = hash_api_key(api_key)
|
| 120 |
-
|
| 121 |
-
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 122 |
-
with open("conversations.json", "a") as f:
|
| 123 |
-
conv_data = {
|
| 124 |
-
"timestamp": timestamp,
|
| 125 |
-
"api_key_hash": current_api_key_hash,
|
| 126 |
-
"messages": conversation_history
|
| 127 |
-
}
|
| 128 |
-
json.dump(conv_data, f)
|
| 129 |
-
f.write("\n")
|
| 130 |
-
|
| 131 |
-
# Update the dropdown choices
|
| 132 |
-
choices = get_conversation_choices(api_key)
|
| 133 |
-
return "Conversation saved successfully!", gr.update(choices=choices)
|
| 134 |
-
except Exception as e:
|
| 135 |
-
return f"Error saving conversation: {str(e)}", gr.update()
|
| 136 |
|
| 137 |
-
|
| 138 |
-
def get_conversation_choices(api_key):
|
| 139 |
-
if not api_key:
|
| 140 |
-
return []
|
| 141 |
-
|
| 142 |
-
if os.path.exists("conversations.json"):
|
| 143 |
-
try:
|
| 144 |
-
api_key_hash = hash_api_key(api_key)
|
| 145 |
-
with open("conversations.json", "r") as f:
|
| 146 |
-
conversations = [json.loads(line) for line in f]
|
| 147 |
-
|
| 148 |
-
choices = []
|
| 149 |
-
for i, conv in enumerate(conversations):
|
| 150 |
-
# Only show conversations for this API key
|
| 151 |
-
if conv.get("api_key_hash") == api_key_hash:
|
| 152 |
-
timestamp = conv.get("timestamp", "Unknown time")
|
| 153 |
-
formatted_time = datetime.strptime(timestamp, "%Y%m%d_%H%M%S").strftime("%Y-%m-%d %H:%M:%S")
|
| 154 |
-
choices.append(f"{i + 1}. {formatted_time}")
|
| 155 |
-
|
| 156 |
-
return choices
|
| 157 |
-
except Exception as e:
|
| 158 |
-
return []
|
| 159 |
-
return []
|
| 160 |
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
if os.path.exists("conversations.json"):
|
| 167 |
-
try:
|
| 168 |
-
api_key_hash = hash_api_key(api_key)
|
| 169 |
-
with open("conversations.json", "r") as f:
|
| 170 |
-
conversations = [json.loads(line) for line in f]
|
| 171 |
-
|
| 172 |
-
conv_text = ""
|
| 173 |
-
conv_count = 0
|
| 174 |
-
for i, conv in enumerate(conversations):
|
| 175 |
-
# Only show conversations for this API key
|
| 176 |
-
if conv.get("api_key_hash") == api_key_hash:
|
| 177 |
-
conv_count += 1
|
| 178 |
-
conv_text += f"\n{'='*50}\nConversation {i + 1}\n{'='*50}\n"
|
| 179 |
-
timestamp = conv.get("timestamp", "Unknown time")
|
| 180 |
-
formatted_time = datetime.strptime(timestamp, "%Y%m%d_%H%M%S").strftime("%Y-%m-%d %H:%M:%S")
|
| 181 |
-
conv_text += f"Timestamp: {formatted_time}\n\n"
|
| 182 |
-
|
| 183 |
-
messages = conv.get("messages", conv)
|
| 184 |
-
for message in messages:
|
| 185 |
-
role = message.get('role', 'unknown')
|
| 186 |
-
content = message.get('content', '')
|
| 187 |
-
conv_text += f"{role.upper()}: {content}\n\n"
|
| 188 |
-
|
| 189 |
-
if conv_count == 0:
|
| 190 |
-
return "No conversations found for your API key."
|
| 191 |
-
|
| 192 |
-
return conv_text
|
| 193 |
-
except Exception as e:
|
| 194 |
-
return f"Error loading conversations: {str(e)}"
|
| 195 |
-
return "No previous conversations found."
|
| 196 |
-
|
| 197 |
-
# Function to view selected conversation
|
| 198 |
-
def view_selected_conversation(selection, api_key):
|
| 199 |
-
if not api_key:
|
| 200 |
-
return "Please enter your OpenAI API key first."
|
| 201 |
-
|
| 202 |
-
if not selection or selection == "Select a conversation":
|
| 203 |
-
return "Please select a conversation to view."
|
| 204 |
-
|
| 205 |
-
try:
|
| 206 |
-
api_key_hash = hash_api_key(api_key)
|
| 207 |
-
# Extract conversation number from selection
|
| 208 |
-
conv_number = int(selection.split(".")[0]) - 1
|
| 209 |
-
|
| 210 |
-
if os.path.exists("conversations.json"):
|
| 211 |
-
with open("conversations.json", "r") as f:
|
| 212 |
-
conversations = [json.loads(line) for line in f]
|
| 213 |
-
|
| 214 |
-
if conv_number < len(conversations):
|
| 215 |
-
conv = conversations[conv_number]
|
| 216 |
-
|
| 217 |
-
# Check if this conversation belongs to the current API key
|
| 218 |
-
if conv.get("api_key_hash") != api_key_hash:
|
| 219 |
-
return "This conversation does not belong to your API key."
|
| 220 |
-
|
| 221 |
-
conv_text = f"{'='*50}\n"
|
| 222 |
-
timestamp = conv.get("timestamp", "Unknown time")
|
| 223 |
-
formatted_time = datetime.strptime(timestamp, "%Y%m%d_%H%M%S").strftime("%Y-%m-%d %H:%M:%S")
|
| 224 |
-
conv_text += f"Conversation Timestamp: {formatted_time}\n"
|
| 225 |
-
conv_text += f"{'='*50}\n\n"
|
| 226 |
-
|
| 227 |
-
messages = conv.get("messages", conv)
|
| 228 |
-
for message in messages:
|
| 229 |
-
role = message.get('role', 'unknown')
|
| 230 |
-
content = message.get('content', '')
|
| 231 |
-
conv_text += f"{role.upper()}: {content}\n\n"
|
| 232 |
-
|
| 233 |
-
return conv_text
|
| 234 |
-
|
| 235 |
-
return "Conversation not found."
|
| 236 |
-
except Exception as e:
|
| 237 |
-
return f"Error viewing conversation: {str(e)}"
|
| 238 |
-
|
| 239 |
-
# Function to delete selected conversation
|
| 240 |
-
def delete_selected_conversation(selection, api_key):
|
| 241 |
-
if not api_key:
|
| 242 |
-
return "Please enter your OpenAI API key first.", gr.update(), "", ""
|
| 243 |
-
|
| 244 |
-
if not selection or selection == "Select a conversation":
|
| 245 |
-
return "Please select a conversation to delete.", gr.update(), "", ""
|
| 246 |
-
|
| 247 |
-
try:
|
| 248 |
-
api_key_hash = hash_api_key(api_key)
|
| 249 |
-
# Extract conversation number from selection
|
| 250 |
-
conv_number = int(selection.split(".")[0]) - 1
|
| 251 |
-
|
| 252 |
-
if os.path.exists("conversations.json"):
|
| 253 |
-
with open("conversations.json", "r") as f:
|
| 254 |
-
conversations = [json.loads(line) for line in f]
|
| 255 |
-
|
| 256 |
-
if conv_number < len(conversations):
|
| 257 |
-
conv = conversations[conv_number]
|
| 258 |
-
|
| 259 |
-
# Check if this conversation belongs to the current API key
|
| 260 |
-
if conv.get("api_key_hash") != api_key_hash:
|
| 261 |
-
return "You can only delete your own conversations.", gr.update(), "", ""
|
| 262 |
-
|
| 263 |
-
# Remove the selected conversation
|
| 264 |
-
del conversations[conv_number]
|
| 265 |
-
|
| 266 |
-
# Write back the remaining conversations
|
| 267 |
-
with open("conversations.json", "w") as f:
|
| 268 |
-
for conv in conversations:
|
| 269 |
-
json.dump(conv, f)
|
| 270 |
-
f.write("\n")
|
| 271 |
-
|
| 272 |
-
# Update dropdown choices
|
| 273 |
-
choices = get_conversation_choices(api_key)
|
| 274 |
-
|
| 275 |
-
# Get updated all conversations display
|
| 276 |
-
all_convs = load_conversations(api_key)
|
| 277 |
-
|
| 278 |
-
return f"Conversation {conv_number + 1} deleted successfully!", gr.update(choices=choices, value="Select a conversation"), all_convs, ""
|
| 279 |
-
else:
|
| 280 |
-
return "Conversation not found.", gr.update(), "", ""
|
| 281 |
-
|
| 282 |
-
return "No conversations file found.", gr.update(), "", ""
|
| 283 |
-
except Exception as e:
|
| 284 |
-
return f"Error deleting conversation: {str(e)}", gr.update(), "", ""
|
| 285 |
-
|
| 286 |
-
# Function to clear current conversation
|
| 287 |
-
def clear_conversation():
|
| 288 |
-
global conversation_history
|
| 289 |
-
conversation_history = []
|
| 290 |
-
return []
|
| 291 |
-
|
| 292 |
-
# Function to delete all conversations for current API key
|
| 293 |
-
def delete_all_conversations(api_key):
|
| 294 |
-
if not api_key:
|
| 295 |
-
return "Please enter your OpenAI API key first.", gr.update(), "", ""
|
| 296 |
-
|
| 297 |
-
try:
|
| 298 |
-
api_key_hash = hash_api_key(api_key)
|
| 299 |
-
|
| 300 |
-
if os.path.exists("conversations.json"):
|
| 301 |
-
with open("conversations.json", "r") as f:
|
| 302 |
-
conversations = [json.loads(line) for line in f]
|
| 303 |
-
|
| 304 |
-
# Keep only conversations that don't belong to this API key
|
| 305 |
-
remaining_conversations = [conv for conv in conversations if conv.get("api_key_hash") != api_key_hash]
|
| 306 |
-
|
| 307 |
-
deleted_count = len(conversations) - len(remaining_conversations)
|
| 308 |
-
|
| 309 |
-
if deleted_count == 0:
|
| 310 |
-
return "No conversations to delete for your API key.", gr.update(), "No previous conversations found.", ""
|
| 311 |
-
|
| 312 |
-
# Write back remaining conversations
|
| 313 |
-
with open("conversations.json", "w") as f:
|
| 314 |
-
for conv in remaining_conversations:
|
| 315 |
-
json.dump(conv, f)
|
| 316 |
-
f.write("\n")
|
| 317 |
-
|
| 318 |
-
# If no conversations left, remove the file
|
| 319 |
-
if len(remaining_conversations) == 0:
|
| 320 |
-
os.remove("conversations.json")
|
| 321 |
-
|
| 322 |
-
return f"All {deleted_count} conversation(s) deleted successfully!", gr.update(choices=[], value="Select a conversation"), "No previous conversations found.", ""
|
| 323 |
-
|
| 324 |
-
return "No conversations to delete.", gr.update(), "No previous conversations found.", ""
|
| 325 |
-
except Exception as e:
|
| 326 |
-
return f"Error deleting conversations: {str(e)}", gr.update(), "", ""
|
| 327 |
-
|
| 328 |
-
# Function to refresh dropdown
|
| 329 |
-
def refresh_dropdown(api_key):
|
| 330 |
-
if not api_key:
|
| 331 |
-
return gr.update(choices=[])
|
| 332 |
-
choices = get_conversation_choices(api_key)
|
| 333 |
-
return gr.update(choices=choices)
|
| 334 |
-
|
| 335 |
-
# Create Gradio interface
|
| 336 |
-
with gr.Blocks(title="Chat with Documents 💬 📚", theme=gr.themes.Ocean()) as demo:
|
| 337 |
-
gr.Markdown("# Chat with Documents 💬 📚")
|
| 338 |
-
gr.Markdown("Upload PDF or DOCX files and chat with them using AI!")
|
| 339 |
-
gr.Markdown("**Note:** Your conversations are private and tied to your API key. Only you can view and manage your conversations.")
|
| 340 |
-
|
| 341 |
-
with gr.Row():
|
| 342 |
-
with gr.Column(scale=2):
|
| 343 |
-
api_key_input = gr.Textbox(
|
| 344 |
-
label="OpenAI API Key",
|
| 345 |
-
type="password",
|
| 346 |
-
placeholder="Enter your OpenAI API key here..."
|
| 347 |
-
)
|
| 348 |
-
|
| 349 |
-
file_upload = gr.File(
|
| 350 |
-
label="Upload PDF or DOCX files",
|
| 351 |
-
file_count="multiple",
|
| 352 |
-
file_types=[".pdf", ".docx"]
|
| 353 |
-
)
|
| 354 |
-
|
| 355 |
-
load_btn = gr.Button("Load Documents", variant="primary")
|
| 356 |
-
load_status = gr.Textbox(label="Status", interactive=False)
|
| 357 |
-
|
| 358 |
-
load_btn.click(
|
| 359 |
-
fn=load_data,
|
| 360 |
-
inputs=[file_upload, api_key_input],
|
| 361 |
-
outputs=load_status
|
| 362 |
-
)
|
| 363 |
-
|
| 364 |
-
with gr.Row():
|
| 365 |
-
with gr.Column(scale=3):
|
| 366 |
-
chatbot = gr.Chatbot(
|
| 367 |
-
label="Chat",
|
| 368 |
-
height=400
|
| 369 |
-
)
|
| 370 |
-
msg = gr.Textbox(
|
| 371 |
-
label="Your Question",
|
| 372 |
-
placeholder="Ask a question about your documents..."
|
| 373 |
-
)
|
| 374 |
-
|
| 375 |
-
with gr.Row():
|
| 376 |
-
submit_btn = gr.Button("Send", variant="primary")
|
| 377 |
-
clear_btn = gr.Button("Clear Chat")
|
| 378 |
-
|
| 379 |
-
with gr.Row():
|
| 380 |
-
save_btn = gr.Button("Save Conversation")
|
| 381 |
-
save_status = gr.Textbox(label="Save Status", interactive=False)
|
| 382 |
-
|
| 383 |
-
with gr.Column(scale=1):
|
| 384 |
-
gr.Markdown("### Conversation Management")
|
| 385 |
-
gr.Markdown("_Only your conversations are shown below_")
|
| 386 |
-
|
| 387 |
-
with gr.Row():
|
| 388 |
-
load_convs_btn = gr.Button("Load All Conversations")
|
| 389 |
-
refresh_btn = gr.Button("🔄 Refresh List")
|
| 390 |
-
|
| 391 |
-
convs_display = gr.Textbox(
|
| 392 |
-
label="All Conversations",
|
| 393 |
-
lines=10,
|
| 394 |
-
interactive=False
|
| 395 |
-
)
|
| 396 |
-
|
| 397 |
-
gr.Markdown("### Select & Manage Conversation")
|
| 398 |
-
|
| 399 |
-
conversation_dropdown = gr.Dropdown(
|
| 400 |
-
label="Select Conversation",
|
| 401 |
-
choices=[],
|
| 402 |
-
value="Select a conversation",
|
| 403 |
-
interactive=True
|
| 404 |
-
)
|
| 405 |
-
|
| 406 |
-
view_btn = gr.Button("View Selected", variant="secondary")
|
| 407 |
-
|
| 408 |
-
with gr.Row():
|
| 409 |
-
with gr.Column(scale=1):
|
| 410 |
-
selected_conv_display = gr.Textbox(
|
| 411 |
-
label="Selected Conversation",
|
| 412 |
-
lines=10,
|
| 413 |
-
interactive=False
|
| 414 |
-
)
|
| 415 |
-
|
| 416 |
-
with gr.Row():
|
| 417 |
-
delete_selected_btn = gr.Button("Delete Selected", variant="stop")
|
| 418 |
-
delete_all_btn = gr.Button("Delete All Mine", variant="stop")
|
| 419 |
-
|
| 420 |
-
delete_status = gr.Textbox(label="Delete Status", interactive=False)
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
# Event handlers
|
| 424 |
-
submit_btn.click(
|
| 425 |
-
fn=chat_with_docs,
|
| 426 |
-
inputs=[msg, chatbot, api_key_input],
|
| 427 |
-
outputs=chatbot
|
| 428 |
-
).then(
|
| 429 |
-
lambda: "",
|
| 430 |
-
outputs=msg
|
| 431 |
-
)
|
| 432 |
-
|
| 433 |
-
msg.submit(
|
| 434 |
-
fn=chat_with_docs,
|
| 435 |
-
inputs=[msg, chatbot, api_key_input],
|
| 436 |
-
outputs=chatbot
|
| 437 |
-
).then(
|
| 438 |
-
lambda: "",
|
| 439 |
-
outputs=msg
|
| 440 |
-
)
|
| 441 |
-
|
| 442 |
-
clear_btn.click(
|
| 443 |
-
fn=clear_conversation,
|
| 444 |
-
outputs=chatbot
|
| 445 |
-
)
|
| 446 |
-
|
| 447 |
-
save_btn.click(
|
| 448 |
-
fn=save_conversation,
|
| 449 |
-
inputs=api_key_input,
|
| 450 |
-
outputs=[save_status, conversation_dropdown]
|
| 451 |
-
)
|
| 452 |
-
|
| 453 |
-
load_convs_btn.click(
|
| 454 |
-
fn=load_conversations,
|
| 455 |
-
inputs=api_key_input,
|
| 456 |
-
outputs=convs_display
|
| 457 |
-
)
|
| 458 |
-
|
| 459 |
-
refresh_btn.click(
|
| 460 |
-
fn=refresh_dropdown,
|
| 461 |
-
inputs=api_key_input,
|
| 462 |
-
outputs=conversation_dropdown
|
| 463 |
-
)
|
| 464 |
-
|
| 465 |
-
view_btn.click(
|
| 466 |
-
fn=view_selected_conversation,
|
| 467 |
-
inputs=[conversation_dropdown, api_key_input],
|
| 468 |
-
outputs=selected_conv_display
|
| 469 |
-
)
|
| 470 |
-
|
| 471 |
-
delete_selected_btn.click(
|
| 472 |
-
fn=delete_selected_conversation,
|
| 473 |
-
inputs=[conversation_dropdown, api_key_input],
|
| 474 |
-
outputs=[delete_status, conversation_dropdown, convs_display, selected_conv_display]
|
| 475 |
-
)
|
| 476 |
-
|
| 477 |
-
delete_all_btn.click(
|
| 478 |
-
fn=delete_all_conversations,
|
| 479 |
-
inputs=api_key_input,
|
| 480 |
-
outputs=[delete_status, conversation_dropdown, convs_display, selected_conv_display]
|
| 481 |
-
)
|
| 482 |
|
| 483 |
if __name__ == "__main__":
|
| 484 |
-
|
|
|
|
| 1 |
+
from Crypto.Cipher import AES
|
| 2 |
+
from Crypto.Protocol.KDF import PBKDF2
|
|
|
|
|
|
|
| 3 |
import os
|
| 4 |
+
import tempfile
|
| 5 |
+
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
+
load_dotenv() # Load all environment variables
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
def unpad(data):
|
| 10 |
+
return data[:-data[-1]]
|
|
|
|
| 11 |
|
| 12 |
+
def decrypt_and_run():
|
| 13 |
+
# Get password from Hugging Face Secrets environment variable
|
| 14 |
+
password = os.getenv("PASSWORD")
|
| 15 |
+
if not password:
|
| 16 |
+
raise ValueError("PASSWORD secret not found in environment variables")
|
|
|
|
|
|
|
| 17 |
|
| 18 |
+
password = password.encode()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
+
with open("code.enc", "rb") as f:
|
| 21 |
+
encrypted = f.read()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
+
salt = encrypted[:16]
|
| 24 |
+
iv = encrypted[16:32]
|
| 25 |
+
ciphertext = encrypted[32:]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
|
| 27 |
+
key = PBKDF2(password, salt, dkLen=32, count=1000000)
|
| 28 |
+
cipher = AES.new(key, AES.MODE_CBC, iv)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
+
plaintext = unpad(cipher.decrypt(ciphertext))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
+
with tempfile.NamedTemporaryFile(suffix=".py", delete=False, mode='wb') as tmp:
|
| 33 |
+
tmp.write(plaintext)
|
| 34 |
+
tmp.flush()
|
| 35 |
+
print(f"[INFO] Running decrypted code from {tmp.name}")
|
| 36 |
+
os.system(f"python {tmp.name}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
if __name__ == "__main__":
|
| 39 |
+
decrypt_and_run()
|