NavyDevilDoc commited on
Commit
a5102e9
·
verified ·
1 Parent(s): f60b86c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +102 -48
app.py CHANGED
@@ -20,7 +20,10 @@ with st.sidebar:
20
  st.divider()
21
 
22
  # 2. FILE UPLOADER (The new feature)
23
- uploaded_file = st.file_uploader("Upload Context", type=['pdf', 'docx', 'txt', 'csv', 'xlsx'])
 
 
 
24
 
25
  if st.button("Clear Chat"):
26
  st.session_state.messages = []
@@ -32,72 +35,123 @@ st.title("⚓ Executive Editor Pro")
32
  if "messages" not in st.session_state:
33
  st.session_state.messages = []
34
 
35
- # --- LOGIC: HANDLE FILE UPLOAD ---
36
- # If a file is uploaded, we automatically extract it and put it in the chat as "User Context"
37
- if uploaded_file and "file_processed" not in st.session_state:
38
- with st.spinner("Reading document..."):
39
- file_text = file_processing.extract_text_from_file(uploaded_file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- # We add a hidden system note or a user message with the file content
42
- file_message = f"**[SYSTEM: Attached File Content from {uploaded_file.name}]**\n\n{file_text}"
43
 
44
- # Append to history so the LLM sees it
45
- st.session_state.messages.append({"role": "user", "content": file_message})
46
- st.session_state.file_processed = True # Flag to prevent re-reading
47
- st.success(f"Loaded {uploaded_file.name}")
48
 
49
  # --- DISPLAY CHAT ---
50
  for msg in st.session_state.messages:
51
  with st.chat_message(msg["role"]):
52
- st.markdown(msg["content"])
 
 
53
 
54
- # --- CHAT INPUT ---
55
  if user_input := st.chat_input("Type instructions or paste text..."):
56
 
57
- # User step
58
  st.session_state.messages.append({"role": "user", "content": user_input})
59
  with st.chat_message("user"):
60
  st.markdown(user_input)
61
 
62
- # AI step
63
  with st.chat_message("assistant"):
64
  message_placeholder = st.empty()
65
  full_response = ""
66
 
67
- # Construct Prompt: Base + Specific Mode
68
- config = prompts.MODE_CONFIG[selected_mode]
69
- system_message = f"""
70
- {prompts.BASE_SYSTEM_PROMPT}
71
-
72
- CURRENT MODE: {selected_mode}
73
- INSTRUCTION: {config['instruction']}
74
-
75
- EXAMPLES OF DESIRED OUTPUT:
76
- {config['examples']}
77
- """
78
-
79
- messages_payload = [{"role": "system", "content": system_instruction}] + st.session_state.messages
80
 
81
- stream = client.chat.completions.create(
82
- model="gpt-4o",
83
- messages=messages_payload,
84
- temperature=0.3,
85
- stream=True
86
- )
 
 
 
 
 
 
 
 
 
 
87
 
88
- for chunk in stream:
89
- if chunk.choices[0].delta.content:
90
- full_response += chunk.choices[0].delta.content
91
- message_placeholder.markdown(full_response + "▌")
92
 
93
- message_placeholder.markdown(full_response)
94
- st.session_state.messages.append({"role": "assistant", "content": full_response})
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
- # --- 3. DOWNLOAD BUTTON (The new feature) ---
97
- # We place this immediately after the generation
98
- st.download_button(
99
- label="💾 Download Result",
100
- data=full_response,
101
- file_name="edited_text.md",
102
- mime="text/markdown"
 
 
 
103
  )
 
20
  st.divider()
21
 
22
  # 2. FILE UPLOADER (The new feature)
23
+ uploaded_files = st.file_uploader(
24
+ "Upload Context",
25
+ type=['pdf', 'docx', 'txt', 'csv', 'xlsx'],
26
+ accept_multiple_files=True)
27
 
28
  if st.button("Clear Chat"):
29
  st.session_state.messages = []
 
35
  if "messages" not in st.session_state:
36
  st.session_state.messages = []
37
 
38
+ # --- MAIN LOGIC: HANDLE BATCH UPLOADS ---
39
+ if uploaded_files and "file_processed" not in st.session_state:
40
+ st.session_state.file_processed = False
41
+
42
+ # We check if files are uploaded AND if we haven't processed this specific batch yet
43
+ if uploaded_files and not st.session_state.file_processed:
44
+
45
+ master_text = ""
46
+ file_names = []
47
+ error_log = []
48
+
49
+ with st.spinner(f"Processing {len(uploaded_files)} documents..."):
50
+ for file in uploaded_files:
51
+ text, error = file_processing.validate_and_extract(file)
52
+
53
+ if error:
54
+ error_log.append(f"❌ {file.name}: {error}")
55
+ else:
56
+ # We wrap each file in XML tags so the AI knows where one ends and the next begins
57
+ master_text += f"\n<document name='{file.name}'>\n{text}\n</document>\n"
58
+ file_names.append(file.name)
59
+
60
+ # Check if we successfully extracted anything
61
+ if master_text:
62
+ # Create the System Note
63
+ system_note = f"**[SYSTEM: User attached {len(file_names)} files]**\n\n{master_text}"
64
+
65
+ # Create the UI Message (Hidden context, Clean display)
66
+ display_message = f"📂 **Batch Upload Processed:**\n"
67
+ for name in file_names:
68
+ display_message += f"- `{name}`\n"
69
+
70
+ if error_log:
71
+ display_message += "\n**Errors:**\n" + "\n".join(error_log)
72
+
73
+ st.session_state.messages.append({
74
+ "role": "user",
75
+ "content": system_note,
76
+ "display_text": display_message
77
+ })
78
 
79
+ st.session_state.file_processed = True
80
+ st.rerun()
81
 
82
+ elif error_log:
83
+ # If everything failed, just show errors
84
+ for e in error_log:
85
+ st.error(e)
86
 
87
  # --- DISPLAY CHAT ---
88
  for msg in st.session_state.messages:
89
  with st.chat_message(msg["role"]):
90
+ # Check if there is specific 'display_text' (for files), otherwise use standard 'content'
91
+ display_content = msg.get("display_text", msg["content"])
92
+ st.markdown(display_content)
93
 
94
+ # --- CHAT INPUT & PROCESSING ---
95
  if user_input := st.chat_input("Type instructions or paste text..."):
96
 
97
+ # 1. User Step
98
  st.session_state.messages.append({"role": "user", "content": user_input})
99
  with st.chat_message("user"):
100
  st.markdown(user_input)
101
 
102
+ # 2. AI Step
103
  with st.chat_message("assistant"):
104
  message_placeholder = st.empty()
105
  full_response = ""
106
 
107
+ # --- FIX FOR NAME ERROR IS HERE ---
108
+ # We explicitly grab the config dictionary for the selected mode
109
+ mode_config = prompts.MODE_CONFIG[selected_mode]
 
 
 
 
 
 
 
 
 
 
110
 
111
+ # We construct the variable 'system_instruction' explicitly
112
+ system_instruction = f"""
113
+ {prompts.BASE_SYSTEM_PROMPT}
114
+
115
+ CURRENT MODE: {selected_mode}
116
+ INSTRUCTION: {mode_config['instruction']}
117
+
118
+ EXAMPLES OF DESIRED OUTPUT:
119
+ {mode_config['examples']}
120
+ """
121
+ # ----------------------------------
122
+
123
+ # Build the payload
124
+ # Note: We filter the messages to only send 'role' and 'content' to OpenAI
125
+ # (OpenAI will crash if we send our custom 'display_text' field)
126
+ api_messages = [{"role": "system", "content": system_instruction}]
127
 
128
+ for msg in st.session_state.messages:
129
+ api_messages.append({"role": msg["role"], "content": msg["content"]})
 
 
130
 
131
+ try:
132
+ stream = client.chat.completions.create(
133
+ model="gpt-4o",
134
+ messages=api_messages,
135
+ temperature=0.3,
136
+ stream=True
137
+ )
138
+
139
+ for chunk in stream:
140
+ if chunk.choices[0].delta.content:
141
+ full_response += chunk.choices[0].delta.content
142
+ message_placeholder.markdown(full_response + "▌")
143
+
144
+ message_placeholder.markdown(full_response)
145
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
146
 
147
+ # Download Button
148
+ st.download_button(
149
+ label="💾 Download Result",
150
+ data=full_response,
151
+ file_name="edited_text.md",
152
+ mime="text/markdown"
153
+ )
154
+
155
+ except Exception as e:
156
+ st.error(f"An error occurred: {e}")
157
  )