Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -19,7 +19,7 @@ BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
|
|
| 19 |
IMAGE_API_URL = "http://211.233.58.201:7896" # μ΄λ―Έμ§ μμ±μ© API
|
| 20 |
MAX_TOKENS = 7999
|
| 21 |
|
| 22 |
-
# ββββββββββββββββββββββββββββββββ Physical Transformation Categories (KR & EN)
|
| 23 |
physical_transformation_categories = {
|
| 24 |
"μΌμ κΈ°λ₯": [
|
| 25 |
"μκ° μΌμ/κ°μ§", "μ²κ° μΌμ/κ°μ§", "μ΄κ° μΌμ/κ°μ§", "λ―Έκ° μΌμ/κ°μ§", "νκ° μΌμ/κ°μ§",
|
|
@@ -294,7 +294,6 @@ physical_transformation_categories_en = {
|
|
| 294 |
"Data-driven decision making / AI adoption",
|
| 295 |
"Convergence of new technologies / Innovative investments"
|
| 296 |
]
|
| 297 |
-
|
| 298 |
}
|
| 299 |
|
| 300 |
# ββββββββββββββββββββββββββββββββ Logging ββββββββββββββββββββββββββββββββ
|
|
@@ -302,7 +301,6 @@ logging.basicConfig(level=logging.INFO,
|
|
| 302 |
format="%(asctime)s - %(levelname)s - %(message)s")
|
| 303 |
|
| 304 |
# ββββββββββββββββββββββββββββββββ OpenAI Client ββββββββββββββββββββββββββ
|
| 305 |
-
|
| 306 |
@st.cache_resource
|
| 307 |
def get_openai_client():
|
| 308 |
"""Create an OpenAI client with timeout and retry settings."""
|
|
@@ -326,8 +324,6 @@ def get_idea_system_prompt(selected_category: str | None = None) -> str:
|
|
| 326 |
f'μ΄ μΉ΄ν
κ³ λ¦¬μ νλͺ©λ€μ 2λ¨κ³μ 3λ¨κ³ λͺ¨λμμ μ°μ μ μΌλ‘ κ³ λ €νμμμ€.\n'
|
| 327 |
) if selected_category else ""
|
| 328 |
|
| 329 |
-
|
| 330 |
-
|
| 331 |
prompt = f"""
|
| 332 |
λ°λμ νκΈ(νκ΅μ΄)λ‘ λ΅λ³νλΌ. λΉμ μ νμ 컨μ€ν΄νΈλ‘μ CCM(ν¬λ‘μ€ μΉ΄ν
κ³ λ¦¬ λ§€νΈλ¦μ€) λ°©λ²λ‘ μ νμ©νμ¬ μ°½μμ μμ΄λμ΄λ₯Ό λμΆν©λλ€.
|
| 333 |
|
|
@@ -645,7 +641,7 @@ def idea_generator_app():
|
|
| 645 |
|
| 646 |
# Set default session state
|
| 647 |
if "ai_model" not in st.session_state:
|
| 648 |
-
st.session_state.ai_model = "gpt-4.1-mini"
|
| 649 |
if "messages" not in st.session_state:
|
| 650 |
st.session_state.messages = []
|
| 651 |
if "auto_save" not in st.session_state:
|
|
@@ -668,8 +664,7 @@ def idea_generator_app():
|
|
| 668 |
if web_search_enabled:
|
| 669 |
sb.info("β
Web search results will be integrated.")
|
| 670 |
|
| 671 |
-
# μμ μ£Όμ λ€
|
| 672 |
-
|
| 673 |
example_topics = {
|
| 674 |
"example1": "λμ λ¬Ό λΆμ‘± λ¬Έμ ν΄κ²°μ μν νμ μ λ°©μ",
|
| 675 |
"example2": "λ
ΈμΈ λλ΄ μλΉμ€μ λμ§νΈ μ ν",
|
|
@@ -685,7 +680,6 @@ def idea_generator_app():
|
|
| 685 |
index=0 # κΈ°λ³Έκ° "(None)"
|
| 686 |
)
|
| 687 |
|
| 688 |
-
|
| 689 |
sb.subheader("Example Prompts")
|
| 690 |
c1, c2, c3 = sb.columns(3)
|
| 691 |
if c1.button("λμ λ¬Ό λΆμ‘± λ¬Έμ ", key="ex1"):
|
|
@@ -803,13 +797,10 @@ def idea_generator_app():
|
|
| 803 |
sb.markdown("---")
|
| 804 |
sb.markdown("Created by [Ginigen.com](https://ginigen.com) | [YouTube](https://www.youtube.com/@ginipickaistudio)")
|
| 805 |
|
| 806 |
-
|
| 807 |
def process_example(topic):
|
| 808 |
"""Handle example prompts."""
|
| 809 |
process_input(topic, [])
|
| 810 |
|
| 811 |
-
# ββ μμ λ process_input μ 체 ββββββββββββββββββββββββββββββββββββββββββββ
|
| 812 |
-
|
| 813 |
# ββββββββββββββββββββββββββββββββ ν¬νΌ: κ²°κ³Ό κΈ°λ‘Β·λ€μ΄λ‘λΒ·μλμ μ₯ ββββββββββ
|
| 814 |
def write_output(md_text: str, prompt: str):
|
| 815 |
"""
|
|
@@ -818,8 +809,7 @@ def write_output(md_text: str, prompt: str):
|
|
| 818 |
β’ μλ JSON λ°±μ
|
| 819 |
"""
|
| 820 |
# β μ±ν
κΈ°λ‘μ μΆκ°
|
| 821 |
-
st.session_state.messages.append(
|
| 822 |
-
{"role": "assistant", "content": md_text})
|
| 823 |
|
| 824 |
# β‘ λ€μ΄λ‘λ λ²νΌ
|
| 825 |
st.subheader("Download This Output")
|
|
@@ -840,21 +830,20 @@ def write_output(md_text: str, prompt: str):
|
|
| 840 |
# β’ μλ JSON μ μ₯
|
| 841 |
if st.session_state.auto_save:
|
| 842 |
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
|
| 843 |
-
|
| 844 |
-
|
| 845 |
-
st.session_state.messages, fp,
|
| 846 |
-
|
| 847 |
-
)
|
| 848 |
-
|
| 849 |
|
| 850 |
-
# ββββββββββββββββββββββββββββββββ process_input
|
| 851 |
def process_input(prompt: str, uploaded_files):
|
| 852 |
"""
|
| 853 |
1) μ¬μ©μ μ
λ ₯μ GPT-4λ‘ λ³΄λ΄ μ°½μμ μμ΄λμ΄ λ³΄κ³ μ μμ±
|
| 854 |
2) μ νμ μΌλ‘ μ΄λ―Έμ§ μμ±
|
| 855 |
-
3) κ²°κ³Όλ₯Ό ν λ²λ§
|
| 856 |
"""
|
| 857 |
-
#
|
| 858 |
if not any(m["role"] == "user" and m["content"] == prompt
|
| 859 |
for m in st.session_state.messages):
|
| 860 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
@@ -862,55 +851,47 @@ def process_input(prompt: str, uploaded_files):
|
|
| 862 |
with st.chat_message("user"):
|
| 863 |
st.markdown(prompt)
|
| 864 |
|
| 865 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 866 |
with st.chat_message("assistant"):
|
| 867 |
-
|
| 868 |
message_placeholder = st.empty()
|
| 869 |
-
full_response = ""
|
| 870 |
-
|
| 871 |
-
use_web_search = st.session_state.web_search_enabled
|
| 872 |
-
has_uploaded = bool(uploaded_files)
|
| 873 |
|
| 874 |
try:
|
| 875 |
-
|
| 876 |
-
|
| 877 |
-
status.update(label="Initializing modelβ¦")
|
| 878 |
-
client = get_openai_client()
|
| 879 |
|
| 880 |
-
#
|
| 881 |
selected_cat = st.session_state.get("category_focus", "(None)")
|
| 882 |
if selected_cat == "(None)":
|
| 883 |
selected_cat = None
|
| 884 |
sys_prompt = get_idea_system_prompt(selected_category=selected_cat)
|
| 885 |
|
|
|
|
| 886 |
def category_context(sel):
|
| 887 |
if sel:
|
| 888 |
return json.dumps(
|
| 889 |
{sel: physical_transformation_categories[sel]},
|
| 890 |
-
ensure_ascii=False
|
| 891 |
-
|
| 892 |
-
|
| 893 |
|
| 894 |
-
#
|
| 895 |
-
|
| 896 |
if use_web_search:
|
| 897 |
-
|
| 898 |
-
with st.spinner("Searchingβ¦"):
|
| 899 |
search_content = do_web_search(keywords(prompt, top=5))
|
| 900 |
-
|
| 901 |
-
file_content = None
|
| 902 |
if has_uploaded:
|
| 903 |
-
|
| 904 |
-
with st.spinner("Processing filesβ¦"):
|
| 905 |
file_content = process_uploaded_files(uploaded_files)
|
|
|
|
|
|
|
| 906 |
|
| 907 |
-
#
|
| 908 |
-
user_content = prompt
|
| 909 |
-
if search_content:
|
| 910 |
-
user_content += "\n\n" + search_content
|
| 911 |
-
if file_content:
|
| 912 |
-
user_content += "\n\n" + file_content
|
| 913 |
-
|
| 914 |
api_messages = [
|
| 915 |
{"role": "system", "content": sys_prompt},
|
| 916 |
{"role": "system", "name": "category_db",
|
|
@@ -918,112 +899,44 @@ def process_input(prompt: str, uploaded_files):
|
|
| 918 |
{"role": "user", "content": user_content},
|
| 919 |
]
|
| 920 |
|
| 921 |
-
#
|
| 922 |
-
|
| 923 |
-
|
| 924 |
-
|
| 925 |
-
|
| 926 |
-
|
| 927 |
-
|
| 928 |
-
|
| 929 |
-
|
| 930 |
-
|
| 931 |
-
|
| 932 |
-
|
| 933 |
-
|
| 934 |
-
|
| 935 |
-
|
| 936 |
-
status.update(label="Ideas created!", state="complete")
|
| 937 |
-
|
| 938 |
-
# ββ 3. μ΄λ―Έμ§ μμ± (μ ν) βββββββββββββββββββββββββββββββββββ
|
| 939 |
-
if st.session_state.generate_image and full_response:
|
| 940 |
-
ccm_match = re.search(
|
| 941 |
-
r"###\s*μ΄λ―Έμ§\s*ν둬ννΈ\s*\n+([^\n]+)",
|
| 942 |
-
full_response, flags=re.IGNORECASE)
|
| 943 |
-
legacy_match = None
|
| 944 |
-
if not ccm_match:
|
| 945 |
-
legacy_match = re.search(
|
| 946 |
-
r"\|\s*(?:\*\*)?Image\s+Prompt(?:\*\*)?\s*\|\s*([^|\n]+)",
|
| 947 |
-
full_response, flags=re.IGNORECASE) or \
|
| 948 |
-
re.search(r"(?i)Image\s+Prompt\s*[:\-]\s*([^\n]+)",
|
| 949 |
-
full_response)
|
| 950 |
-
match = ccm_match or legacy_match
|
| 951 |
-
if match:
|
| 952 |
-
raw_prompt = re.sub(r"[\r\n`\"'\\]", " ",
|
| 953 |
-
match.group(1)).strip()
|
| 954 |
-
with st.spinner("μμ΄λμ΄ μ΄λ―Έμ§ μμ± μ€β¦"):
|
| 955 |
-
img, cap = generate_image(raw_prompt)
|
| 956 |
-
if img:
|
| 957 |
-
st.image(img, caption=f"μμ΄λμ΄ μκ°ν β {cap}")
|
| 958 |
-
st.session_state.messages.append({
|
| 959 |
-
"role": "assistant",
|
| 960 |
-
"content": "",
|
| 961 |
-
"image": img,
|
| 962 |
-
"image_caption": f"μμ΄λμ΄ μκ°ν β {cap}"
|
| 963 |
-
})
|
| 964 |
-
|
| 965 |
-
# ββ 4. κ²°κ³Όλ₯Ό **ν λ²λ§** κΈ°λ‘Β·λ€μ΄λ‘λΒ·λ°±μ
ββββββββββββββββ
|
| 966 |
-
write_output(full_response, prompt)
|
| 967 |
-
|
| 968 |
-
except Exception as e:
|
| 969 |
-
# μμΈ λ°μ μ: λ‘κ·Έ κΈ°λ‘ + μ¬μ©μ μλ¦Όλ§ μν
|
| 970 |
-
logging.error("process_input error", exc_info=True)
|
| 971 |
-
placeholder.error(f"β οΈ μμ
μ€ μ€λ₯κ° λ°μνμ΅λλ€: {e}")
|
| 972 |
-
st.session_state.messages.append(
|
| 973 |
-
{"role": "assistant", "content": f"β οΈ μ€λ₯: {e}"})
|
| 974 |
-
|
| 975 |
-
|
| 976 |
-
# ---- μΉ΄ν
κ³ λ¦¬/νμνλͺ© 컨ν
μ€νΈ μΆκ° ---------------------------
|
| 977 |
-
def category_context(sel):
|
| 978 |
-
if sel: # νΉμ μΉ΄ν
κ³ λ¦¬ μ ν μ
|
| 979 |
-
return json.dumps(
|
| 980 |
-
{sel: physical_transformation_categories[sel]},
|
| 981 |
-
ensure_ascii=False)
|
| 982 |
-
# (None) β ν€ λͺ©λ‘λ§ μ λ¬
|
| 983 |
-
return "ALL_CATEGORIES: " + ", ".join(
|
| 984 |
-
physical_transformation_categories.keys())
|
| 985 |
|
| 986 |
-
|
| 987 |
-
{"role": "system", "content": sys_prompt},
|
| 988 |
-
{"role": "system", "name": "category_db",
|
| 989 |
-
"content": category_context(selected_cat)},
|
| 990 |
-
{"role": "user", "content": user_content},
|
| 991 |
-
]
|
| 992 |
-
# --------------------------------------------------------------
|
| 993 |
-
|
| 994 |
-
# β£ OpenAI μ€νΈλ¦¬λ° νΈμΆ
|
| 995 |
-
status.update(label="Generating ideasβ¦")
|
| 996 |
-
stream = client.chat.completions.create(
|
| 997 |
-
model="gpt-4.1-mini",
|
| 998 |
-
messages=api_messages,
|
| 999 |
-
temperature=1, max_tokens=MAX_TOKENS,
|
| 1000 |
-
top_p=1, stream=True
|
| 1001 |
-
)
|
| 1002 |
-
for chunk in stream:
|
| 1003 |
-
if chunk.choices and chunk.choices[0].delta.content:
|
| 1004 |
-
full_response += chunk.choices[0].delta.content
|
| 1005 |
-
message_placeholder.markdown(full_response + "β")
|
| 1006 |
message_placeholder.markdown(full_response)
|
| 1007 |
-
status.update(label="Ideas created!", state="complete")
|
| 1008 |
|
| 1009 |
-
#
|
| 1010 |
if st.session_state.generate_image and full_response:
|
| 1011 |
-
#
|
| 1012 |
-
ccm_match = re.search(
|
| 1013 |
-
|
| 1014 |
-
|
| 1015 |
legacy_match = None
|
| 1016 |
if not ccm_match:
|
| 1017 |
legacy_match = re.search(
|
| 1018 |
r"\|\s*(?:\*\*)?Image\s+Prompt(?:\*\*)?\s*\|\s*([^|\n]+)",
|
| 1019 |
-
full_response, flags=re.IGNORECASE
|
| 1020 |
-
|
| 1021 |
-
|
|
|
|
|
|
|
| 1022 |
match = ccm_match or legacy_match
|
| 1023 |
if match:
|
| 1024 |
-
raw_prompt = re.sub(r"[\r\n`\"'\\]", " ",
|
| 1025 |
-
|
| 1026 |
-
with st.spinner("μμ΄λμ΄ μ΄λ―Έμ§ μμ± μ€β¦"):
|
| 1027 |
img, cap = generate_image(raw_prompt)
|
| 1028 |
if img:
|
| 1029 |
st.image(img, caption=f"μμ΄λμ΄ μκ°ν β {cap}")
|
|
@@ -1034,48 +947,14 @@ def process_input(prompt: str, uploaded_files):
|
|
| 1034 |
"image_caption": f"μμ΄λμ΄ μκ°ν β {cap}"
|
| 1035 |
})
|
| 1036 |
|
| 1037 |
-
|
| 1038 |
-
|
| 1039 |
-
placeholder.error(f"Error: {err}")
|
| 1040 |
-
logging.error(err)
|
| 1041 |
-
st.session_state.messages.append({
|
| 1042 |
-
"role": "assistant",
|
| 1043 |
-
"content": f"β οΈ μμ
μ€ μ€λ₯κ° λ°μνμ΅λλ€: {err}"
|
| 1044 |
-
})
|
| 1045 |
-
|
| 1046 |
-
|
| 1047 |
-
# Download buttons
|
| 1048 |
-
if full_response:
|
| 1049 |
-
st.subheader("Download This Output")
|
| 1050 |
-
c1, c2 = st.columns(2)
|
| 1051 |
-
c1.download_button(
|
| 1052 |
-
"Markdown",
|
| 1053 |
-
data=full_response,
|
| 1054 |
-
file_name=f"{prompt[:30]}.md",
|
| 1055 |
-
mime="text/markdown"
|
| 1056 |
-
)
|
| 1057 |
-
c2.download_button(
|
| 1058 |
-
"HTML",
|
| 1059 |
-
data=md_to_html(full_response, prompt[:30]),
|
| 1060 |
-
file_name=f"{prompt[:30]}.html",
|
| 1061 |
-
mime="text/html"
|
| 1062 |
-
)
|
| 1063 |
-
|
| 1064 |
-
# Auto-save
|
| 1065 |
-
if st.session_state.auto_save and st.session_state.messages:
|
| 1066 |
-
try:
|
| 1067 |
-
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
|
| 1068 |
-
with open(fn, "w", encoding="utf-8") as fp:
|
| 1069 |
-
json.dump(st.session_state.messages, fp, ensure_ascii=False, indent=2)
|
| 1070 |
-
except Exception as e:
|
| 1071 |
-
logging.error(f"Auto-save failed: {e}")
|
| 1072 |
|
| 1073 |
except Exception as e:
|
| 1074 |
-
|
| 1075 |
-
|
| 1076 |
-
|
| 1077 |
-
|
| 1078 |
-
st.session_state.messages.append({"role": "assistant", "content": ans})
|
| 1079 |
|
| 1080 |
# ββββββββββββββββββββββββββββββββ main ββββββββββββββββββββββββββββββββββββ
|
| 1081 |
def main():
|
|
@@ -1083,4 +962,3 @@ def main():
|
|
| 1083 |
|
| 1084 |
if __name__ == "__main__":
|
| 1085 |
main()
|
| 1086 |
-
|
|
|
|
| 19 |
IMAGE_API_URL = "http://211.233.58.201:7896" # μ΄λ―Έμ§ μμ±μ© API
|
| 20 |
MAX_TOKENS = 7999
|
| 21 |
|
| 22 |
+
# ββββββββββββββββββββββββββββββββ Physical Transformation Categories (KR & EN) ββββββββββββββββ
|
| 23 |
physical_transformation_categories = {
|
| 24 |
"μΌμ κΈ°λ₯": [
|
| 25 |
"μκ° μΌμ/κ°μ§", "μ²κ° μΌμ/κ°μ§", "μ΄κ° μΌμ/κ°μ§", "λ―Έκ° μΌμ/κ°μ§", "νκ° μΌμ/κ°μ§",
|
|
|
|
| 294 |
"Data-driven decision making / AI adoption",
|
| 295 |
"Convergence of new technologies / Innovative investments"
|
| 296 |
]
|
|
|
|
| 297 |
}
|
| 298 |
|
| 299 |
# ββββββββββββββββββββββββββββββββ Logging ββββββββββββββββββββββββββββββββ
|
|
|
|
| 301 |
format="%(asctime)s - %(levelname)s - %(message)s")
|
| 302 |
|
| 303 |
# ββββββββββββββββββββββββββββββββ OpenAI Client ββββββββββββββββββββββββββ
|
|
|
|
| 304 |
@st.cache_resource
|
| 305 |
def get_openai_client():
|
| 306 |
"""Create an OpenAI client with timeout and retry settings."""
|
|
|
|
| 324 |
f'μ΄ μΉ΄ν
κ³ λ¦¬μ νλͺ©λ€μ 2λ¨κ³μ 3λ¨κ³ λͺ¨λμμ μ°μ μ μΌλ‘ κ³ λ €νμμμ€.\n'
|
| 325 |
) if selected_category else ""
|
| 326 |
|
|
|
|
|
|
|
| 327 |
prompt = f"""
|
| 328 |
λ°λμ νκΈ(νκ΅μ΄)λ‘ λ΅λ³νλΌ. λΉμ μ νμ 컨μ€ν΄νΈλ‘μ CCM(ν¬λ‘μ€ μΉ΄ν
κ³ λ¦¬ λ§€νΈλ¦μ€) λ°©λ²λ‘ μ νμ©νμ¬ μ°½μμ μμ΄λμ΄λ₯Ό λμΆν©λλ€.
|
| 329 |
|
|
|
|
| 641 |
|
| 642 |
# Set default session state
|
| 643 |
if "ai_model" not in st.session_state:
|
| 644 |
+
st.session_state.ai_model = "gpt-4.1-mini"
|
| 645 |
if "messages" not in st.session_state:
|
| 646 |
st.session_state.messages = []
|
| 647 |
if "auto_save" not in st.session_state:
|
|
|
|
| 664 |
if web_search_enabled:
|
| 665 |
sb.info("β
Web search results will be integrated.")
|
| 666 |
|
| 667 |
+
# μμ μ£Όμ λ€
|
|
|
|
| 668 |
example_topics = {
|
| 669 |
"example1": "λμ λ¬Ό λΆμ‘± λ¬Έμ ν΄κ²°μ μν νμ μ λ°©μ",
|
| 670 |
"example2": "λ
ΈμΈ λλ΄ μλΉμ€μ λμ§νΈ μ ν",
|
|
|
|
| 680 |
index=0 # κΈ°λ³Έκ° "(None)"
|
| 681 |
)
|
| 682 |
|
|
|
|
| 683 |
sb.subheader("Example Prompts")
|
| 684 |
c1, c2, c3 = sb.columns(3)
|
| 685 |
if c1.button("λμ λ¬Ό λΆμ‘± λ¬Έμ ", key="ex1"):
|
|
|
|
| 797 |
sb.markdown("---")
|
| 798 |
sb.markdown("Created by [Ginigen.com](https://ginigen.com) | [YouTube](https://www.youtube.com/@ginipickaistudio)")
|
| 799 |
|
|
|
|
| 800 |
def process_example(topic):
|
| 801 |
"""Handle example prompts."""
|
| 802 |
process_input(topic, [])
|
| 803 |
|
|
|
|
|
|
|
| 804 |
# ββββββββββββββββββββββββββββββββ ν¬νΌ: κ²°κ³Ό κΈ°λ‘Β·λ€μ΄λ‘λΒ·μλμ μ₯ ββββββββββ
|
| 805 |
def write_output(md_text: str, prompt: str):
|
| 806 |
"""
|
|
|
|
| 809 |
β’ μλ JSON λ°±μ
|
| 810 |
"""
|
| 811 |
# β μ±ν
κΈ°λ‘μ μΆκ°
|
| 812 |
+
st.session_state.messages.append({"role": "assistant", "content": md_text})
|
|
|
|
| 813 |
|
| 814 |
# β‘ λ€μ΄λ‘λ λ²νΌ
|
| 815 |
st.subheader("Download This Output")
|
|
|
|
| 830 |
# β’ μλ JSON μ μ₯
|
| 831 |
if st.session_state.auto_save:
|
| 832 |
fn = f"chat_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
|
| 833 |
+
try:
|
| 834 |
+
with open(fn, "w", encoding="utf-8") as fp:
|
| 835 |
+
json.dump(st.session_state.messages, fp, ensure_ascii=False, indent=2)
|
| 836 |
+
except Exception as e:
|
| 837 |
+
logging.error(f"Auto-save failed: {e}")
|
|
|
|
| 838 |
|
| 839 |
+
# ββββββββββββββββββββββββββββββββ process_input ββββββββββββββββββββββββββββ
|
| 840 |
def process_input(prompt: str, uploaded_files):
|
| 841 |
"""
|
| 842 |
1) μ¬μ©μ μ
λ ₯μ GPT-4λ‘ λ³΄λ΄ μ°½μμ μμ΄λμ΄ λ³΄κ³ μ μμ±
|
| 843 |
2) μ νμ μΌλ‘ μ΄λ―Έμ§ μμ±
|
| 844 |
+
3) κ²°κ³Όλ₯Ό ν λ²λ§ κΈ°λ‘Β·λ€μ΄λ‘λΒ·λ°±μ
(μ€λ³΅ μΆλ ₯ λ°©μ§)
|
| 845 |
"""
|
| 846 |
+
# μ¬μ©μ λ©μμ§ κΈ°λ‘
|
| 847 |
if not any(m["role"] == "user" and m["content"] == prompt
|
| 848 |
for m in st.session_state.messages):
|
| 849 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
|
|
|
| 851 |
with st.chat_message("user"):
|
| 852 |
st.markdown(prompt)
|
| 853 |
|
| 854 |
+
# GPT νΈμΆμ μν μ΄κΈ°ν
|
| 855 |
+
use_web_search = st.session_state.web_search_enabled
|
| 856 |
+
has_uploaded = bool(uploaded_files)
|
| 857 |
+
full_response = ""
|
| 858 |
+
|
| 859 |
with st.chat_message("assistant"):
|
| 860 |
+
# ν둬ννΈ μΆλ ₯ μμ (μ€μκ° μ€νΈλ¦¬λ°)
|
| 861 |
message_placeholder = st.empty()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 862 |
|
| 863 |
try:
|
| 864 |
+
with st.spinner("Preparing to generate ideas..."):
|
| 865 |
+
client = get_openai_client()
|
|
|
|
|
|
|
| 866 |
|
| 867 |
+
# μμ€ν
ν둬ννΈ κ΅¬μ±
|
| 868 |
selected_cat = st.session_state.get("category_focus", "(None)")
|
| 869 |
if selected_cat == "(None)":
|
| 870 |
selected_cat = None
|
| 871 |
sys_prompt = get_idea_system_prompt(selected_category=selected_cat)
|
| 872 |
|
| 873 |
+
# μΉ΄ν
κ³ λ¦¬ μ 보 (JSON νμ)
|
| 874 |
def category_context(sel):
|
| 875 |
if sel:
|
| 876 |
return json.dumps(
|
| 877 |
{sel: physical_transformation_categories[sel]},
|
| 878 |
+
ensure_ascii=False
|
| 879 |
+
)
|
| 880 |
+
return "ALL_CATEGORIES: " + ", ".join(physical_transformation_categories.keys())
|
| 881 |
|
| 882 |
+
# μΉ κ²μ λ° νμΌ λ΄μ© κ²°ν©
|
| 883 |
+
user_content = prompt
|
| 884 |
if use_web_search:
|
| 885 |
+
with st.spinner("Searching the web..."):
|
|
|
|
| 886 |
search_content = do_web_search(keywords(prompt, top=5))
|
| 887 |
+
user_content += "\n\n" + search_content
|
|
|
|
| 888 |
if has_uploaded:
|
| 889 |
+
with st.spinner("Processing uploaded files..."):
|
|
|
|
| 890 |
file_content = process_uploaded_files(uploaded_files)
|
| 891 |
+
if file_content:
|
| 892 |
+
user_content += "\n\n" + file_content
|
| 893 |
|
| 894 |
+
# λν λ©μμ§ λ°°μ΄
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 895 |
api_messages = [
|
| 896 |
{"role": "system", "content": sys_prompt},
|
| 897 |
{"role": "system", "name": "category_db",
|
|
|
|
| 899 |
{"role": "user", "content": user_content},
|
| 900 |
]
|
| 901 |
|
| 902 |
+
# GPT-4 μ€νΈλ¦¬λ° νΈμΆ
|
| 903 |
+
with st.spinner("Generating ideas..."):
|
| 904 |
+
stream = client.chat.completions.create(
|
| 905 |
+
model="gpt-4.1-mini",
|
| 906 |
+
messages=api_messages,
|
| 907 |
+
temperature=1,
|
| 908 |
+
max_tokens=MAX_TOKENS,
|
| 909 |
+
top_p=1,
|
| 910 |
+
stream=True
|
| 911 |
+
)
|
| 912 |
+
for chunk in stream:
|
| 913 |
+
if chunk.choices and chunk.choices[0].delta.get("content"):
|
| 914 |
+
token_text = chunk.choices[0].delta["content"]
|
| 915 |
+
full_response += token_text
|
| 916 |
+
message_placeholder.markdown(full_response + "β")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 917 |
|
| 918 |
+
# μ΅μ’
μΆλ ₯
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 919 |
message_placeholder.markdown(full_response)
|
|
|
|
| 920 |
|
| 921 |
+
# μ΄λ―Έμ§ μλ μμ±
|
| 922 |
if st.session_state.generate_image and full_response:
|
| 923 |
+
# ν¨ν΄1: "### μ΄λ―Έμ§ ν둬ννΈ" νν νμ
|
| 924 |
+
ccm_match = re.search(r"###\s*μ΄λ―Έμ§\s*ν둬ννΈ\s*\n+([^\n]+)",
|
| 925 |
+
full_response, flags=re.IGNORECASE)
|
| 926 |
+
# ν¨ν΄2: μμ νμ "Image Prompt" λ±
|
| 927 |
legacy_match = None
|
| 928 |
if not ccm_match:
|
| 929 |
legacy_match = re.search(
|
| 930 |
r"\|\s*(?:\*\*)?Image\s+Prompt(?:\*\*)?\s*\|\s*([^|\n]+)",
|
| 931 |
+
full_response, flags=re.IGNORECASE
|
| 932 |
+
) or re.search(
|
| 933 |
+
r"(?i)Image\s+Prompt\s*[:\-]\s*([^\n]+)",
|
| 934 |
+
full_response
|
| 935 |
+
)
|
| 936 |
match = ccm_match or legacy_match
|
| 937 |
if match:
|
| 938 |
+
raw_prompt = re.sub(r"[\r\n`\"'\\]", " ", match.group(1)).strip()
|
| 939 |
+
with st.spinner("Generating idea image..."):
|
|
|
|
| 940 |
img, cap = generate_image(raw_prompt)
|
| 941 |
if img:
|
| 942 |
st.image(img, caption=f"μμ΄λμ΄ μκ°ν β {cap}")
|
|
|
|
| 947 |
"image_caption": f"μμ΄λμ΄ μκ°ν β {cap}"
|
| 948 |
})
|
| 949 |
|
| 950 |
+
# κ²°κ³Ό κΈ°λ‘Β·λ€μ΄λ‘λΒ·λ°±μ
|
| 951 |
+
write_output(full_response, prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 952 |
|
| 953 |
except Exception as e:
|
| 954 |
+
logging.error("Error in process_input", exc_info=True)
|
| 955 |
+
err_msg = f"β οΈ μ€λ₯κ° λ°μνμ΅λλ€: {e}"
|
| 956 |
+
st.error(err_msg)
|
| 957 |
+
st.session_state.messages.append({"role": "assistant", "content": err_msg})
|
|
|
|
| 958 |
|
| 959 |
# ββββββββββββββββββββββββββββββββ main ββββββββββββββββββββββββββββββββββββ
|
| 960 |
def main():
|
|
|
|
| 962 |
|
| 963 |
if __name__ == "__main__":
|
| 964 |
main()
|
|
|