stream ture
Browse files
app.py
CHANGED
|
@@ -11,75 +11,92 @@ if not api_key:
|
|
| 11 |
|
| 12 |
client = AsyncOpenAI(api_key=api_key)
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
async def generate_content(prompt, model="gpt-4o", max_tokens=2048):
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
|
| 26 |
async def generate_context(summaries, system_prompt_prefix, system_prompt_suffix):
|
| 27 |
prompt = f"{system_prompt_prefix}\n\n{summaries}\n\n{system_prompt_suffix}"
|
|
|
|
| 28 |
return await generate_content(prompt)
|
| 29 |
|
| 30 |
async def generate_script(context, heading, system_prompt_prefix, system_prompt_suffix):
|
| 31 |
prompt = f"{system_prompt_prefix}\n\n{context}\n\nHeading: {heading}\n\n{system_prompt_suffix}"
|
|
|
|
| 32 |
return await generate_content(prompt, max_tokens=4000)
|
| 33 |
|
| 34 |
def main():
|
| 35 |
-
st.title("Script Generator")
|
| 36 |
|
|
|
|
| 37 |
context_system_prompt_prefix = st.text_input(
|
| 38 |
-
"
|
| 39 |
-
value="You are an AI assistant.
|
| 40 |
)
|
| 41 |
-
summaries = st.text_area("
|
| 42 |
context_system_prompt_suffix = st.text_input(
|
| 43 |
-
"
|
| 44 |
-
value="Focus on
|
| 45 |
)
|
| 46 |
|
| 47 |
-
|
|
|
|
| 48 |
script_system_prompt_prefix = st.text_input(
|
| 49 |
-
"
|
| 50 |
-
value="You are an AI scriptwriter.
|
| 51 |
)
|
| 52 |
script_system_prompt_suffix = st.text_input(
|
| 53 |
-
"
|
| 54 |
-
value="
|
| 55 |
)
|
| 56 |
|
| 57 |
-
if st.button("Generate
|
| 58 |
if not summaries or not outline:
|
| 59 |
-
st.error("Please enter both the
|
| 60 |
return
|
| 61 |
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
if context:
|
| 64 |
-
st.
|
| 65 |
st.write(context)
|
| 66 |
|
| 67 |
headings = [h.strip() for h in outline.split("\n") if h.strip()]
|
| 68 |
-
scripts = []
|
| 69 |
for heading in headings:
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
for heading, script in scripts:
|
| 77 |
-
st.subheader(heading)
|
| 78 |
-
st.write(script)
|
| 79 |
-
else:
|
| 80 |
-
st.error("Failed to generate scripts. Please try again.")
|
| 81 |
-
else:
|
| 82 |
-
st.error("Failed to generate context. Please try again.")
|
| 83 |
|
| 84 |
if __name__ == "__main__":
|
| 85 |
-
main()
|
|
|
|
| 11 |
|
| 12 |
client = AsyncOpenAI(api_key=api_key)
|
| 13 |
|
| 14 |
+
async def generate_stream(prompt, model="gpt-4o", max_tokens=2048):
|
| 15 |
+
"""
|
| 16 |
+
非同期ストリーミングを行い、
|
| 17 |
+
レスポンスをチャンクごとに返すジェネレータ。
|
| 18 |
+
"""
|
| 19 |
+
response = await client.chat.completions.create(
|
| 20 |
+
model=model,
|
| 21 |
+
messages=[{"role": "user", "content": prompt}],
|
| 22 |
+
max_tokens=max_tokens,
|
| 23 |
+
stream=True
|
| 24 |
+
)
|
| 25 |
+
async for chunk in response:
|
| 26 |
+
delta = chunk.choices[0].delta.get("content", "")
|
| 27 |
+
yield delta
|
| 28 |
+
|
| 29 |
async def generate_content(prompt, model="gpt-4o", max_tokens=2048):
|
| 30 |
+
"""
|
| 31 |
+
全文を組み立てつつストリーミングで返す。
|
| 32 |
+
"""
|
| 33 |
+
content = ""
|
| 34 |
+
placeholder = st.empty()
|
| 35 |
+
# チャンク毎に表示を更新
|
| 36 |
+
async for delta in generate_stream(prompt, model, max_tokens):
|
| 37 |
+
content += delta
|
| 38 |
+
placeholder.write(content)
|
| 39 |
+
return content
|
| 40 |
|
| 41 |
async def generate_context(summaries, system_prompt_prefix, system_prompt_suffix):
|
| 42 |
prompt = f"{system_prompt_prefix}\n\n{summaries}\n\n{system_prompt_suffix}"
|
| 43 |
+
st.subheader("Context Generation")
|
| 44 |
return await generate_content(prompt)
|
| 45 |
|
| 46 |
async def generate_script(context, heading, system_prompt_prefix, system_prompt_suffix):
|
| 47 |
prompt = f"{system_prompt_prefix}\n\n{context}\n\nHeading: {heading}\n\n{system_prompt_suffix}"
|
| 48 |
+
st.subheader(f"Script for: {heading}")
|
| 49 |
return await generate_content(prompt, max_tokens=4000)
|
| 50 |
|
| 51 |
def main():
|
| 52 |
+
st.title("Script Generator (Streaming & Async)")
|
| 53 |
|
| 54 |
+
# --- コンテキスト生成用 ---
|
| 55 |
context_system_prompt_prefix = st.text_input(
|
| 56 |
+
"Context Prompt Prefix:",
|
| 57 |
+
value="You are an AI assistant. Provide a concise context from the summary."
|
| 58 |
)
|
| 59 |
+
summaries = st.text_area("Document Summary:", height=150)
|
| 60 |
context_system_prompt_suffix = st.text_input(
|
| 61 |
+
"Context Prompt Suffix:",
|
| 62 |
+
value="Focus on key points and present clearly."
|
| 63 |
)
|
| 64 |
|
| 65 |
+
# --- スクリプト生成用 ---
|
| 66 |
+
outline = st.text_area("Content Outline (one heading per line):", height=150)
|
| 67 |
script_system_prompt_prefix = st.text_input(
|
| 68 |
+
"Script Prompt Prefix:",
|
| 69 |
+
value="You are an AI scriptwriter. Generate a detailed script."
|
| 70 |
)
|
| 71 |
script_system_prompt_suffix = st.text_input(
|
| 72 |
+
"Script Prompt Suffix:",
|
| 73 |
+
value="Make it informative, engaging, >600 characters."
|
| 74 |
)
|
| 75 |
|
| 76 |
+
if st.button("Generate (Streaming)"):
|
| 77 |
if not summaries or not outline:
|
| 78 |
+
st.error("Please enter both the summary and the outline.")
|
| 79 |
return
|
| 80 |
|
| 81 |
+
# asyncio.run で非同期関数を同期的に実行
|
| 82 |
+
context = asyncio.run(generate_context(
|
| 83 |
+
summaries,
|
| 84 |
+
context_system_prompt_prefix,
|
| 85 |
+
context_system_prompt_suffix
|
| 86 |
+
))
|
| 87 |
+
|
| 88 |
if context:
|
| 89 |
+
st.markdown("**Generated Context:**")
|
| 90 |
st.write(context)
|
| 91 |
|
| 92 |
headings = [h.strip() for h in outline.split("\n") if h.strip()]
|
|
|
|
| 93 |
for heading in headings:
|
| 94 |
+
_ = asyncio.run(generate_script(
|
| 95 |
+
context,
|
| 96 |
+
heading,
|
| 97 |
+
script_system_prompt_prefix,
|
| 98 |
+
script_system_prompt_suffix
|
| 99 |
+
))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
if __name__ == "__main__":
|
| 102 |
+
main()
|