|
|
import os |
|
|
import asyncio |
|
|
import streamlit as st |
|
|
from openai import AsyncOpenAI |
|
|
|
|
|
|
|
|
api_key = os.environ.get("OPENAI_API_KEY") |
|
|
if not api_key: |
|
|
st.error("OpenAI API key not found. Please set the OPENAI_API_KEY environment variable.") |
|
|
st.stop() |
|
|
|
|
|
client = AsyncOpenAI(api_key=api_key) |
|
|
|
|
|
async def generate_stream(prompt, model="gpt-4o", max_tokens=2048): |
|
|
""" |
|
|
非同期ストリーミングを行い、 |
|
|
レスポンスをチャンクごとに返すジェネレータ。 |
|
|
""" |
|
|
response = await client.chat.completions.create( |
|
|
model=model, |
|
|
messages=[{"role": "user", "content": prompt}], |
|
|
max_tokens=max_tokens, |
|
|
stream=True |
|
|
) |
|
|
async for chunk in response: |
|
|
|
|
|
delta_obj = chunk.choices[0].delta |
|
|
if hasattr(delta_obj, "content") and delta_obj.content: |
|
|
yield delta_obj.content |
|
|
|
|
|
async def generate_content(prompt, model="gpt-4o", max_tokens=2048): |
|
|
""" |
|
|
全文を組み立てつつストリーミングで返す。 |
|
|
""" |
|
|
content = "" |
|
|
placeholder = st.empty() |
|
|
async for delta in generate_stream(prompt, model, max_tokens): |
|
|
content += delta |
|
|
placeholder.write(content) |
|
|
return content |
|
|
|
|
|
async def generate_context(summaries, system_prompt_prefix, system_prompt_suffix): |
|
|
prompt = f"{system_prompt_prefix}\n\n{summaries}\n\n{system_prompt_suffix}" |
|
|
st.subheader("Context Generation") |
|
|
return await generate_content(prompt) |
|
|
|
|
|
async def generate_script(context, heading, system_prompt_prefix, system_prompt_suffix): |
|
|
prompt = f"{system_prompt_prefix}\n\n{context}\n\nHeading: {heading}\n\n{system_prompt_suffix}" |
|
|
st.subheader(f"Script for: {heading}") |
|
|
return await generate_content(prompt, max_tokens=4000) |
|
|
|
|
|
def main(): |
|
|
st.title("Script Generator (Streaming & Async)") |
|
|
|
|
|
|
|
|
context_system_prompt_prefix = st.text_input( |
|
|
"Context Prompt Prefix:", |
|
|
value="You are an AI assistant. Provide a concise context from the summary." |
|
|
) |
|
|
summaries = st.text_area("Document Summary:", height=150) |
|
|
context_system_prompt_suffix = st.text_input( |
|
|
"Context Prompt Suffix:", |
|
|
value="Focus on key points and present clearly." |
|
|
) |
|
|
|
|
|
|
|
|
outline = st.text_area("Content Outline (one heading per line):", height=150) |
|
|
script_system_prompt_prefix = st.text_input( |
|
|
"Script Prompt Prefix:", |
|
|
value="You are an AI scriptwriter. Generate a detailed script." |
|
|
) |
|
|
script_system_prompt_suffix = st.text_input( |
|
|
"Script Prompt Suffix:", |
|
|
value="Make it informative, engaging, >600 characters." |
|
|
) |
|
|
|
|
|
if st.button("Generate (Streaming)"): |
|
|
if not summaries or not outline: |
|
|
st.error("Please enter both the summary and the outline.") |
|
|
return |
|
|
|
|
|
|
|
|
context = asyncio.run(generate_context( |
|
|
summaries, |
|
|
context_system_prompt_prefix, |
|
|
context_system_prompt_suffix |
|
|
)) |
|
|
|
|
|
if context: |
|
|
st.markdown("**Generated Context:**") |
|
|
st.write(context) |
|
|
|
|
|
headings = [h.strip() for h in outline.split("\n") if h.strip()] |
|
|
for heading in headings: |
|
|
_ = asyncio.run(generate_script( |
|
|
context, |
|
|
heading, |
|
|
script_system_prompt_prefix, |
|
|
script_system_prompt_suffix |
|
|
)) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|