Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -775,48 +775,47 @@ def process_example(topic):
|
|
| 775 |
|
| 776 |
def process_input(prompt: str, uploaded_files):
|
| 777 |
# Add user's message
|
| 778 |
-
if not any(m["role"] == "user" and m["content"] == prompt
|
|
|
|
| 779 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 780 |
|
| 781 |
with st.chat_message("user"):
|
| 782 |
st.markdown(prompt)
|
| 783 |
-
|
| 784 |
with st.chat_message("assistant"):
|
| 785 |
placeholder = st.empty()
|
| 786 |
message_placeholder = st.empty()
|
| 787 |
full_response = ""
|
| 788 |
|
| 789 |
-
use_web_search
|
| 790 |
-
|
| 791 |
-
|
| 792 |
try:
|
| 793 |
-
status = st.status("Preparing to generate ideas
|
| 794 |
-
status.update(label="Initializing
|
| 795 |
|
| 796 |
client = get_openai_client()
|
| 797 |
-
|
| 798 |
-
# Prepare system prompt
|
| 799 |
|
|
|
|
| 800 |
selected_cat = st.session_state.get("category_focus", "(None)")
|
| 801 |
if selected_cat == "(None)":
|
| 802 |
selected_cat = None
|
| 803 |
-
|
| 804 |
sys_prompt = get_idea_system_prompt(selected_category=selected_cat)
|
| 805 |
-
|
|
|
|
| 806 |
search_content = None
|
| 807 |
if use_web_search:
|
| 808 |
-
status.update(label="
|
| 809 |
-
with st.spinner("Searching
|
| 810 |
search_content = do_web_search(keywords(prompt, top=5))
|
| 811 |
|
| 812 |
-
# File contents
|
| 813 |
file_content = None
|
| 814 |
-
if
|
| 815 |
-
status.update(label="
|
| 816 |
-
with st.spinner("
|
| 817 |
file_content = process_uploaded_files(uploaded_files)
|
| 818 |
|
| 819 |
-
#
|
| 820 |
user_content = prompt
|
| 821 |
if search_content:
|
| 822 |
user_content += "\n\n" + search_content
|
|
@@ -825,108 +824,91 @@ def process_input(prompt: str, uploaded_files):
|
|
| 825 |
|
| 826 |
api_messages = [
|
| 827 |
{"role": "system", "content": sys_prompt},
|
| 828 |
-
{"role": "user",
|
| 829 |
]
|
| 830 |
|
| 831 |
-
# OpenAI
|
| 832 |
-
status.update(label="Generating
|
| 833 |
-
|
| 834 |
-
|
| 835 |
-
|
| 836 |
-
|
| 837 |
-
|
| 838 |
-
|
| 839 |
-
|
| 840 |
-
|
| 841 |
-
|
| 842 |
-
|
| 843 |
-
if (chunk.choices
|
| 844 |
-
and len(chunk.choices) > 0
|
| 845 |
and chunk.choices[0].delta.content is not None):
|
| 846 |
-
|
| 847 |
-
|
| 848 |
-
|
| 849 |
-
|
| 850 |
-
|
| 851 |
-
|
| 852 |
-
except Exception as api_error:
|
| 853 |
-
error_message = str(api_error)
|
| 854 |
-
logging.error(f"API error: {error_message}")
|
| 855 |
-
status.update(label=f"Error: {error_message}", state="error")
|
| 856 |
-
raise Exception(f"Idea generation error: {error_message}")
|
| 857 |
-
|
| 858 |
-
# Store final text
|
| 859 |
-
answer_entry_saved = False
|
| 860 |
-
|
| 861 |
-
# μλ μ΄λ―Έμ§ μμ±μ΄ νμ±νλμ΄ μλ€λ©΄
|
| 862 |
if st.session_state.generate_image and full_response:
|
| 863 |
-
# μμ΄λμ΄λ₯Ό 3κ°λ‘ ꡬλΆνκ³ , κ° μμ΄λμ΄λ§λ€ μ΄λ―Έμ§ ν둬ννΈλ₯Ό μΆμΆ
|
| 864 |
-
# κ°λ¨ν λ°©μ: μ κ·μμΌλ‘ `Image Prompt:` ννλ₯Ό μ°Ύλλ€κ³ κ°μ (μμ€ν
ν둬ννΈμ μ§μ)
|
| 865 |
-
# νΉμ λ¨μν ν λ²λ§ μΆμΆ -> μ¬κΈ°μλ 3κ° μμ΄λμ΄ κ°κ°μ μ°ΎκΈ° μν΄ λλ λ³Έλ€.
|
| 866 |
-
# μΌλ¨μ μ 체 λ΅λ³μμ "English prompt for generating an image" λΆλΆμ μ°Ύλλ€.
|
| 867 |
-
|
| 868 |
-
# λ§€μ° λ¨μν νμ± μμ (κ°μ κ°λ₯)
|
| 869 |
idea_sections = re.split(r"(## Idea \d+:)", full_response)
|
| 870 |
-
|
| 871 |
-
|
| 872 |
-
|
| 873 |
-
|
| 874 |
-
for
|
| 875 |
-
|
| 876 |
-
|
| 877 |
-
|
| 878 |
-
|
| 879 |
-
|
| 880 |
-
|
| 881 |
-
for idx, (title, text_block) in enumerate(pairs, start=1):
|
| 882 |
-
# β ν(λ§ν¬λ€μ΄ ν
μ΄λΈ) μμ λ€μ΄μλ νν ββ> | **Image Prompt** | prompt |
|
| 883 |
table_match = re.search(
|
| 884 |
-
r"
|
| 885 |
-
|
| 886 |
-
|
| 887 |
-
|
| 888 |
-
|
| 889 |
-
|
| 890 |
-
|
| 891 |
-
|
| 892 |
-
|
| 893 |
-
|
| 894 |
-
|
| 895 |
-
|
| 896 |
-
|
| 897 |
-
|
| 898 |
-
|
| 899 |
-
|
| 900 |
-
|
| 901 |
-
|
| 902 |
-
|
| 903 |
-
|
| 904 |
-
|
| 905 |
-
|
| 906 |
-
|
| 907 |
-
|
| 908 |
-
|
| 909 |
-
|
| 910 |
-
|
| 911 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 912 |
|
| 913 |
-
|
| 914 |
-
# λ§μ½ λ¬Έμ₯ λμ λΆνμν ꡬλμ μ΄λ μ€λ°κΏμ΄ μμ μ μ κ±°
|
| 915 |
-
raw_prompt = re.sub(r"[\r\n]+", " ", raw_prompt)
|
| 916 |
-
raw_prompt = re.sub(r"[\"'`]", "", raw_prompt)
|
| 917 |
-
# μ΄λ―Έμ§ μμ±
|
| 918 |
-
with st.spinner(f"Generating image for {title}..."):
|
| 919 |
-
img, cap = generate_image(raw_prompt)
|
| 920 |
-
if img:
|
| 921 |
-
st.image(img, caption=f"{title} - {cap}")
|
| 922 |
-
# λνμ μ μ₯
|
| 923 |
-
st.session_state.messages.append({
|
| 924 |
-
"role": "assistant",
|
| 925 |
-
"content": "",
|
| 926 |
-
"image": img,
|
| 927 |
-
"image_caption": f"{title} - {cap}"
|
| 928 |
-
})
|
| 929 |
-
|
| 930 |
# 3κ° μ΄λ―Έμ§ μμ± νλ‘μΈμ€λ₯Ό λ§μΉ ν μ΅μ’
ν
μ€νΈλ₯Ό μ μ₯
|
| 931 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
| 932 |
answer_entry_saved = True
|
|
|
|
| 775 |
|
| 776 |
def process_input(prompt: str, uploaded_files):
|
| 777 |
# Add user's message
|
| 778 |
+
if not any(m["role"] == "user" and m["content"] == prompt
|
| 779 |
+
for m in st.session_state.messages):
|
| 780 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
| 781 |
|
| 782 |
with st.chat_message("user"):
|
| 783 |
st.markdown(prompt)
|
| 784 |
+
|
| 785 |
with st.chat_message("assistant"):
|
| 786 |
placeholder = st.empty()
|
| 787 |
message_placeholder = st.empty()
|
| 788 |
full_response = ""
|
| 789 |
|
| 790 |
+
use_web_search = st.session_state.web_search_enabled
|
| 791 |
+
has_uploaded = bool(uploaded_files)
|
| 792 |
+
|
| 793 |
try:
|
| 794 |
+
status = st.status("Preparing to generate ideasβ¦")
|
| 795 |
+
status.update(label="Initializing modelβ¦")
|
| 796 |
|
| 797 |
client = get_openai_client()
|
|
|
|
|
|
|
| 798 |
|
| 799 |
+
# ββ β μμ€ν
ν둬ννΈ ββββββββββββββββββββββββββββββ
|
| 800 |
selected_cat = st.session_state.get("category_focus", "(None)")
|
| 801 |
if selected_cat == "(None)":
|
| 802 |
selected_cat = None
|
|
|
|
| 803 |
sys_prompt = get_idea_system_prompt(selected_category=selected_cat)
|
| 804 |
+
|
| 805 |
+
# ββ β‘ (μ ν) μΉ κ²μ & νμΌλ΄μ© βββββββββββββββββββ
|
| 806 |
search_content = None
|
| 807 |
if use_web_search:
|
| 808 |
+
status.update(label="Searching the webβ¦")
|
| 809 |
+
with st.spinner("Searchingβ¦"):
|
| 810 |
search_content = do_web_search(keywords(prompt, top=5))
|
| 811 |
|
|
|
|
| 812 |
file_content = None
|
| 813 |
+
if has_uploaded:
|
| 814 |
+
status.update(label="Reading uploaded filesβ¦")
|
| 815 |
+
with st.spinner("Processing filesβ¦"):
|
| 816 |
file_content = process_uploaded_files(uploaded_files)
|
| 817 |
|
| 818 |
+
# ββ β’ λν λ©μμ§ κ΅¬μ± βββββββββββββββββββββββββββββ
|
| 819 |
user_content = prompt
|
| 820 |
if search_content:
|
| 821 |
user_content += "\n\n" + search_content
|
|
|
|
| 824 |
|
| 825 |
api_messages = [
|
| 826 |
{"role": "system", "content": sys_prompt},
|
| 827 |
+
{"role": "user", "content": user_content},
|
| 828 |
]
|
| 829 |
|
| 830 |
+
# ββ β£ OpenAI μ€νΈλ¦¬λ° νΈμΆ ββββββββββββββββββββββββ
|
| 831 |
+
status.update(label="Generating ideasβ¦")
|
| 832 |
+
stream = client.chat.completions.create(
|
| 833 |
+
model = "gpt-4.1-mini",
|
| 834 |
+
messages = api_messages,
|
| 835 |
+
temperature = 1,
|
| 836 |
+
max_tokens = MAX_TOKENS,
|
| 837 |
+
top_p = 1,
|
| 838 |
+
stream = True
|
| 839 |
+
)
|
| 840 |
+
for chunk in stream:
|
| 841 |
+
if (chunk.choices
|
|
|
|
|
|
|
| 842 |
and chunk.choices[0].delta.content is not None):
|
| 843 |
+
full_response += chunk.choices[0].delta.content
|
| 844 |
+
message_placeholder.markdown(full_response + "β")
|
| 845 |
+
message_placeholder.markdown(full_response)
|
| 846 |
+
status.update(label="Ideas created!", state="complete")
|
| 847 |
+
|
| 848 |
+
# ββ β€ μ΄λ―Έμ§ μμ± ββββββββββββββββββββββββββββββββ
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 849 |
if st.session_state.generate_image and full_response:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 850 |
idea_sections = re.split(r"(## Idea \d+:)", full_response)
|
| 851 |
+
pairs = [(idea_sections[i].strip(),
|
| 852 |
+
idea_sections[i+1].strip() if i+1 < len(idea_sections) else "")
|
| 853 |
+
for i in range(1, len(idea_sections), 2)]
|
| 854 |
+
|
| 855 |
+
for idx, (title, text_block) in enumerate(pairs, start=1):
|
| 856 |
+
# ν νν: | **Image Prompt** | prompt |
|
| 857 |
+
table_match = re.search(
|
| 858 |
+
r"\|\s*\*\*Image\s+Prompt\*\*\s*\|\s*([^\n\|]+)",
|
| 859 |
+
text_block, flags=re.IGNORECASE)
|
| 860 |
+
# λ°±μ
νν: Image Prompt: β¦
|
| 861 |
+
if not table_match:
|
|
|
|
|
|
|
| 862 |
table_match = re.search(
|
| 863 |
+
r"(?i)Image\s+Prompt\s*[:|-]\s*([^\n]+)", text_block)
|
| 864 |
+
|
| 865 |
+
if not table_match:
|
| 866 |
+
continue
|
| 867 |
+
|
| 868 |
+
raw_prompt = re.sub(r"[\r\n\|`'\"\\]", " ",
|
| 869 |
+
table_match.group(1).strip())
|
| 870 |
+
|
| 871 |
+
with st.spinner(f"Generating image for {title}β¦"):
|
| 872 |
+
img, cap = generate_image(raw_prompt)
|
| 873 |
+
|
| 874 |
+
if img:
|
| 875 |
+
st.image(img, caption=f"{title} β {cap}")
|
| 876 |
+
st.session_state.messages.append({
|
| 877 |
+
"role": "assistant",
|
| 878 |
+
"content": "",
|
| 879 |
+
"image": img,
|
| 880 |
+
"image_caption": f"{title} β {cap}"
|
| 881 |
+
})
|
| 882 |
+
|
| 883 |
+
# ββ β₯ κ²°κ³Ό μ μ₯ & λ€μ΄λ‘λ ββββββοΏ½οΏ½οΏ½ββββββββββββββββββ
|
| 884 |
+
st.session_state.messages.append(
|
| 885 |
+
{"role": "assistant", "content": full_response})
|
| 886 |
+
|
| 887 |
+
st.subheader("Download This Output")
|
| 888 |
+
c1, c2 = st.columns(2)
|
| 889 |
+
c1.download_button("Markdown", full_response,
|
| 890 |
+
file_name=f"{prompt[:30]}.md",
|
| 891 |
+
mime="text/markdown")
|
| 892 |
+
c2.download_button("HTML",
|
| 893 |
+
md_to_html(full_response, prompt[:30]),
|
| 894 |
+
file_name=f"{prompt[:30]}.html",
|
| 895 |
+
mime="text/html")
|
| 896 |
+
|
| 897 |
+
# ββ β¦ μλ μ μ₯ βββββββββββββββββββββββββββββββββ
|
| 898 |
+
if st.session_state.auto_save:
|
| 899 |
+
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
|
| 900 |
+
with open(fn, "w", encoding="utf-8") as fp:
|
| 901 |
+
json.dump(st.session_state.messages, fp,
|
| 902 |
+
ensure_ascii=False, indent=2)
|
| 903 |
+
|
| 904 |
+
except Exception as e:
|
| 905 |
+
err = str(e)
|
| 906 |
+
placeholder.error(f"Error: {err}")
|
| 907 |
+
logging.error(err)
|
| 908 |
+
st.session_state.messages.append(
|
| 909 |
+
{"role": "assistant",
|
| 910 |
+
"content": f"β οΈ μμ
μ€ μ€λ₯κ° λ°μνμ΅λλ€: {err}"})
|
| 911 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 912 |
# 3κ° μ΄λ―Έμ§ μμ± νλ‘μΈμ€λ₯Ό λ§μΉ ν μ΅μ’
ν
μ€νΈλ₯Ό μ μ₯
|
| 913 |
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
| 914 |
answer_entry_saved = True
|