NavyDevilDoc commited on
Commit
fedd979
·
verified ·
1 Parent(s): 46dcfa5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -78
app.py CHANGED
@@ -2,109 +2,95 @@ import streamlit as st
2
  from openai import OpenAI
3
  import os
4
 
5
- # 1. Configuration
6
- st.set_page_config(page_title="Executive Editor", layout="wide", page_icon="⚓")
 
7
 
8
- # API Key Logic (Same as before)
9
- api_key = os.environ.get("OPENAI_API_KEY")
10
- if not api_key:
11
- try:
12
- api_key = st.secrets["OPENAI_API_KEY"]
13
- except (FileNotFoundError, KeyError):
14
- st.error("OpenAI API Key not found.")
15
- st.stop()
16
 
 
 
17
  client = OpenAI(api_key=api_key)
18
 
19
- # 2. Sidebar Controls (The "Executive Dashboard")
20
  with st.sidebar:
21
- st.header("🖊Editing Parameters")
22
 
23
- # Mode Selection
24
- edit_mode = st.radio(
25
- "Optimization Goal",
26
- [
27
- "Standard Polish (Grammar & Flow)",
28
- "BLUF (Bottom Line Up Front)",
29
- "Diplomatic / De-escalate",
30
- "Executive Summary (Concise)",
31
- "Technical Precision"
32
- ]
33
- )
34
-
35
- # Output Format
36
- format_mode = st.selectbox("Output Format", ["Plain Text", "Email Structure", "Memo Format", "Bullet Points"])
37
 
38
  st.divider()
39
 
40
- if st.button("🗑️ Clear Context", use_container_width=True):
 
 
 
41
  st.session_state.messages = []
42
  st.rerun()
43
 
44
- # 3. The "Command" System Prompt
45
- # Note the lower temperature and specific instructions for professional contexts.
46
- SYSTEM_PROMPT = f"""
47
- You are an expert Executive Editor and Technical Writer.
48
- Your goal is to rewrite the user's input to be clear, concise, and professional.
49
-
50
- CURRENT SETTINGS:
51
- - **Goal:** {edit_mode}
52
- - **Format:** {format_mode}
53
-
54
- OPERATIONAL RULES:
55
- 1. **Direct Action:** Do not explain your changes unless asked. Just provide the rewritten text.
56
- 2. **Preserve Intent:** Never change the core meaning or facts of the input.
57
- 3. **No Fluff:** Remove adjectives and adverbs that do not add value.
58
- 4. **BLUF:** If the mode is "BLUF", ensure the main request or conclusion is the very first sentence.
59
- 5. **Diplomacy:** If the mode is "Diplomatic", strip out aggression/emotion and replace with professional firmness.
60
-
61
- Start every response immediately with the rewritten text.
62
- """
63
 
64
- # 4. Session State
65
  if "messages" not in st.session_state:
66
  st.session_state.messages = []
67
 
68
- # 5. Main Interface
69
- st.title("⚓ Executive Editor")
70
- st.markdown(f"**Current Mode:** `{edit_mode}`")
 
 
 
 
 
 
 
 
 
 
71
 
72
- # Display History
73
- for message in st.session_state.messages:
74
- with st.chat_message(message["role"]):
75
- st.markdown(message["content"])
76
 
77
- # Input Handling
78
- if user_input := st.chat_input("Paste text to edit, or type instructions..."):
79
 
80
- # User Message
81
  st.session_state.messages.append({"role": "user", "content": user_input})
82
  with st.chat_message("user"):
83
  st.markdown(user_input)
84
 
85
- # AI Response
86
  with st.chat_message("assistant"):
87
  message_placeholder = st.empty()
88
  full_response = ""
89
 
90
- # We re-inject the system prompt every turn to capture Sidebar changes
91
- messages_payload = [{"role": "system", "content": SYSTEM_PROMPT}] + st.session_state.messages
 
 
92
 
93
- try:
94
- response = client.chat.completions.create(
95
- model="gpt-4o",
96
- messages=messages_payload,
97
- temperature=0.3, # Low temp = High consistency/Precision
98
- stream=True
99
- )
100
-
101
- for chunk in response:
102
- if chunk.choices[0].delta.content is not None:
103
- full_response += chunk.choices[0].delta.content
104
- message_placeholder.markdown(full_response + "▌")
105
-
106
- message_placeholder.markdown(full_response)
107
- st.session_state.messages.append({"role": "assistant", "content": full_response})
108
-
109
- except Exception as e:
110
- st.error(f"Error: {e}")
 
 
 
 
 
 
2
  from openai import OpenAI
3
  import os
4
 
5
+ # Import our new modules
6
+ import prompts
7
+ import file_processing
8
 
9
+ st.set_page_config(page_title="Executive Editor Pro", layout="wide", page_icon="⚓")
 
 
 
 
 
 
 
10
 
11
+ # ... (API Key Logic remains the same) ...
12
+ api_key = os.environ.get("OPENAI_API_KEY")
13
  client = OpenAI(api_key=api_key)
14
 
15
+ # --- SIDEBAR ---
16
  with st.sidebar:
17
+ st.header("Controls")
18
 
19
+ # 1. Mode Selection (Pulled from prompts.py)
20
+ selected_mode = st.radio("Editing Mode", list(prompts.MODE_INSTRUCTIONS.keys()))
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  st.divider()
23
 
24
+ # 2. FILE UPLOADER (The new feature)
25
+ uploaded_file = st.file_uploader("Upload Context", type=['pdf', 'docx', 'txt', 'csv', 'xlsx'])
26
+
27
+ if st.button("Clear Chat"):
28
  st.session_state.messages = []
29
  st.rerun()
30
 
31
+ # --- MAIN APP ---
32
+ st.title("⚓ Executive Editor Pro")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
 
34
  if "messages" not in st.session_state:
35
  st.session_state.messages = []
36
 
37
+ # --- LOGIC: HANDLE FILE UPLOAD ---
38
+ # If a file is uploaded, we automatically extract it and put it in the chat as "User Context"
39
+ if uploaded_file and "file_processed" not in st.session_state:
40
+ with st.spinner("Reading document..."):
41
+ file_text = file_processing.extract_text_from_file(uploaded_file)
42
+
43
+ # We add a hidden system note or a user message with the file content
44
+ file_message = f"**[SYSTEM: Attached File Content from {uploaded_file.name}]**\n\n{file_text}"
45
+
46
+ # Append to history so the LLM sees it
47
+ st.session_state.messages.append({"role": "user", "content": file_message})
48
+ st.session_state.file_processed = True # Flag to prevent re-reading
49
+ st.success(f"Loaded {uploaded_file.name}")
50
 
51
+ # --- DISPLAY CHAT ---
52
+ for msg in st.session_state.messages:
53
+ with st.chat_message(msg["role"]):
54
+ st.markdown(msg["content"])
55
 
56
+ # --- CHAT INPUT ---
57
+ if user_input := st.chat_input("Type instructions or paste text..."):
58
 
59
+ # User step
60
  st.session_state.messages.append({"role": "user", "content": user_input})
61
  with st.chat_message("user"):
62
  st.markdown(user_input)
63
 
64
+ # AI step
65
  with st.chat_message("assistant"):
66
  message_placeholder = st.empty()
67
  full_response = ""
68
 
69
+ # Construct Prompt: Base + Specific Mode
70
+ system_instruction = prompts.BASE_SYSTEM_PROMPT + "\n\nCURRENT MODE: " + prompts.MODE_INSTRUCTIONS[selected_mode]
71
+
72
+ messages_payload = [{"role": "system", "content": system_instruction}] + st.session_state.messages
73
 
74
+ stream = client.chat.completions.create(
75
+ model="gpt-4o",
76
+ messages=messages_payload,
77
+ temperature=0.3,
78
+ stream=True
79
+ )
80
+
81
+ for chunk in stream:
82
+ if chunk.choices[0].delta.content:
83
+ full_response += chunk.choices[0].delta.content
84
+ message_placeholder.markdown(full_response + "▌")
85
+
86
+ message_placeholder.markdown(full_response)
87
+ st.session_state.messages.append({"role": "assistant", "content": full_response})
88
+
89
+ # --- 3. DOWNLOAD BUTTON (The new feature) ---
90
+ # We place this immediately after the generation
91
+ st.download_button(
92
+ label="💾 Download Result",
93
+ data=full_response,
94
+ file_name="edited_text.md",
95
+ mime="text/markdown"
96
+ )