File size: 3,658 Bytes
1863a9b 73bc47c 1863a9b 73bc47c 1863a9b 73bc47c 2cbe1ec ac891f5 e28e99e ac891f5 889fd1a ac891f5 1863a9b 73bc47c ac891f5 73bc47c 1863a9b 73bc47c ac891f5 73bc47c f0a7d18 73bc47c ac891f5 1863a9b ac891f5 73bc47c ac891f5 73bc47c ac891f5 73bc47c ac891f5 73bc47c 1863a9b ac891f5 73bc47c ac891f5 73bc47c ac891f5 73bc47c 1863a9b ac891f5 73bc47c ac891f5 73bc47c ac891f5 73bc47c ac891f5 73bc47c ac891f5 1863a9b 73bc47c ac891f5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import os
import asyncio
import streamlit as st
from openai import AsyncOpenAI
# OpenAI API key setup
api_key = os.environ.get("OPENAI_API_KEY")
if not api_key:
st.error("OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.")
st.stop()
client = AsyncOpenAI(api_key=api_key)
async def generate_stream(prompt, model="gpt-4o", max_tokens=2048):
"""
非同期ストリーミングを行い、
レスポンスをチャンクごとに返すジェネレータ。
"""
response = await client.chat.completions.create(
model=model,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
stream=True
)
async for chunk in response:
# chunk.choices[0].delta は ChoiceDelta オブジェクトなので .content を使う
delta_obj = chunk.choices[0].delta
if hasattr(delta_obj, "content") and delta_obj.content:
yield delta_obj.content
async def generate_content(prompt, model="gpt-4o", max_tokens=2048):
"""
全文を組み立てつつストリーミングで返す。
"""
content = ""
placeholder = st.empty()
async for delta in generate_stream(prompt, model, max_tokens):
content += delta
placeholder.write(content)
return content
async def generate_context(summaries, system_prompt_prefix, system_prompt_suffix):
prompt = f"{system_prompt_prefix}\n\n{summaries}\n\n{system_prompt_suffix}"
st.subheader("Context Generation")
return await generate_content(prompt)
async def generate_script(context, heading, system_prompt_prefix, system_prompt_suffix):
prompt = f"{system_prompt_prefix}\n\n{context}\n\nHeading: {heading}\n\n{system_prompt_suffix}"
st.subheader(f"Script for: {heading}")
return await generate_content(prompt, max_tokens=4000)
def main():
st.title("Script Generator (Streaming & Async)")
# --- コンテキスト生成用 ---
context_system_prompt_prefix = st.text_input(
"Context Prompt Prefix:",
value="You are an AI assistant. Provide a concise context from the summary."
)
summaries = st.text_area("Document Summary:", height=150)
context_system_prompt_suffix = st.text_input(
"Context Prompt Suffix:",
value="Focus on key points and present clearly."
)
# --- スクリプト生成用 ---
outline = st.text_area("Content Outline (one heading per line):", height=150)
script_system_prompt_prefix = st.text_input(
"Script Prompt Prefix:",
value="You are an AI scriptwriter. Generate a detailed script."
)
script_system_prompt_suffix = st.text_input(
"Script Prompt Suffix:",
value="Make it informative, engaging, >600 characters."
)
if st.button("Generate (Streaming)"):
if not summaries or not outline:
st.error("Please enter both the summary and the outline.")
return
# asyncio.run で非同期関数を同期的に実行
context = asyncio.run(generate_context(
summaries,
context_system_prompt_prefix,
context_system_prompt_suffix
))
if context:
st.markdown("**Generated Context:**")
st.write(context)
headings = [h.strip() for h in outline.split("\n") if h.strip()]
for heading in headings:
_ = asyncio.run(generate_script(
context,
heading,
script_system_prompt_prefix,
script_system_prompt_suffix
))
if __name__ == "__main__":
main()
|