Spaces:
Sleeping
Sleeping
File size: 5,288 Bytes
2157a89 fedd979 2157a89 fedd979 2157a89 fedd979 2157a89 fedd979 2157a89 fedd979 2157a89 fedd979 a14f7e2 2157a89 fedd979 a5102e9 fedd979 2157a89 fedd979 2157a89 a5102e9 fedd979 a5102e9 fedd979 a5102e9 2157a89 fedd979 a5102e9 2157a89 a5102e9 fedd979 2157a89 a5102e9 2157a89 a5102e9 2157a89 a5102e9 2157a89 a5102e9 fedd979 a5102e9 fedd979 a5102e9 fedd979 a5102e9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 | import streamlit as st
from openai import OpenAI
import os
import prompts
import file_processing
st.set_page_config(page_title="Executive Editor Pro", layout="wide", page_icon="⚓")
# ... (API Key Logic remains the same) ...
api_key = os.environ.get("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)
# --- SIDEBAR ---
with st.sidebar:
st.header("⚙️ Controls")
# 1. Mode Selection (Pulled from prompts.py)
selected_mode = st.radio("Editing Mode", list(prompts.MODE_CONFIG.keys()))
st.divider()
# 2. FILE UPLOADER (The new feature)
uploaded_files = st.file_uploader(
"Upload Context",
type=['pdf', 'docx', 'txt', 'csv', 'xlsx'],
accept_multiple_files=True)
if st.button("Clear Chat"):
st.session_state.messages = []
st.rerun()
# --- MAIN APP ---
st.title("⚓ Executive Editor Pro")
if "messages" not in st.session_state:
st.session_state.messages = []
# --- MAIN LOGIC: HANDLE BATCH UPLOADS ---
if uploaded_files and "file_processed" not in st.session_state:
st.session_state.file_processed = False
# We check if files are uploaded AND if we haven't processed this specific batch yet
if uploaded_files and not st.session_state.file_processed:
master_text = ""
file_names = []
error_log = []
with st.spinner(f"Processing {len(uploaded_files)} documents..."):
for file in uploaded_files:
text, error = file_processing.validate_and_extract(file)
if error:
error_log.append(f"❌ {file.name}: {error}")
else:
# We wrap each file in XML tags so the AI knows where one ends and the next begins
master_text += f"\n<document name='{file.name}'>\n{text}\n</document>\n"
file_names.append(file.name)
# Check if we successfully extracted anything
if master_text:
# Create the System Note
system_note = f"**[SYSTEM: User attached {len(file_names)} files]**\n\n{master_text}"
# Create the UI Message (Hidden context, Clean display)
display_message = f"📂 **Batch Upload Processed:**\n"
for name in file_names:
display_message += f"- `{name}`\n"
if error_log:
display_message += "\n**Errors:**\n" + "\n".join(error_log)
st.session_state.messages.append({
"role": "user",
"content": system_note,
"display_text": display_message
})
st.session_state.file_processed = True
st.rerun()
elif error_log:
# If everything failed, just show errors
for e in error_log:
st.error(e)
# --- DISPLAY CHAT ---
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
# Check if there is specific 'display_text' (for files), otherwise use standard 'content'
display_content = msg.get("display_text", msg["content"])
st.markdown(display_content)
# --- CHAT INPUT & PROCESSING ---
if user_input := st.chat_input("Type instructions or paste text..."):
# 1. User Step
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
# 2. AI Step
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# --- FIX FOR NAME ERROR IS HERE ---
# We explicitly grab the config dictionary for the selected mode
mode_config = prompts.MODE_CONFIG[selected_mode]
# We construct the variable 'system_instruction' explicitly
system_instruction = f"""
{prompts.BASE_SYSTEM_PROMPT}
CURRENT MODE: {selected_mode}
INSTRUCTION: {mode_config['instruction']}
EXAMPLES OF DESIRED OUTPUT:
{mode_config['examples']}
"""
# ----------------------------------
# Build the payload
# Note: We filter the messages to only send 'role' and 'content' to OpenAI
# (OpenAI will crash if we send our custom 'display_text' field)
api_messages = [{"role": "system", "content": system_instruction}]
for msg in st.session_state.messages:
api_messages.append({"role": msg["role"], "content": msg["content"]})
try:
stream = client.chat.completions.create(
model="gpt-4o",
messages=api_messages,
temperature=0.3,
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
full_response += chunk.choices[0].delta.content
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
# Download Button
st.download_button(
label="💾 Download Result",
data=full_response,
file_name="edited_text.md",
mime="text/markdown"
)
except Exception as e:
st.error(f"An error occurred: {e}")
|