Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import requests | |
| import time | |
| from openai import OpenAI | |
| from prompts import SYSTEM_MESSAGE, USER_MESSAGE | |
| import json | |
| from bs4 import BeautifulSoup | |
| import os | |
| # Set Streamlit layout to wide mode | |
| st.set_page_config(layout="wide") | |
| st.title("π¬ AI-Powered Content Planner - Public Blueprints") | |
| st.markdown("Paste a source content for generating a Public Blueprints Content Plan.") | |
| # Placeholder at the very top for response time | |
| time_placeholder = st.empty() | |
| # Sidebar for OpenAI API Key | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| DEFAULT_MODEL = "gpt-4o-mini" | |
| GROQ_MODELS = ["llama-3.3-70b-specdec", "llama-3.3-70b-versatile", "llama-3.1-8b-instant"] | |
| # Sidebar: Model Selection | |
| st.sidebar.subheader("βοΈ Settings") | |
| MODEL = st.sidebar.selectbox( | |
| "Choose OpenAI Model:", | |
| [DEFAULT_MODEL, "gpt-4o", "o3-mini"] + GROQ_MODELS, | |
| index=0 # Default selection | |
| ) | |
| if not OPENAI_API_KEY: | |
| st.warning("β οΈ Please enter your OpenAI API key.") | |
| st.stop() | |
| if MODEL in GROQ_MODELS: | |
| client = OpenAI(base_url="https://api.groq.com/openai/v1", api_key=os.environ.get("GROQ_API_KEY")) | |
| else: | |
| client = OpenAI(api_key=OPENAI_API_KEY) | |
| # Helper function to fetch blog content from a URL | |
| def fetch_blog_content(url): | |
| try: | |
| res = requests.get(url, timeout=10) | |
| res.raise_for_status() # Raise an error for bad status codes | |
| soup = BeautifulSoup(res.text, "html.parser") | |
| # A simple extraction: get all text within <p> tags | |
| paragraphs = soup.find_all('p') | |
| content = "\n\n".join([p.get_text() for p in paragraphs]) | |
| return content.strip() | |
| except Exception as e: | |
| st.error(f"Error fetching blog content: {e}") | |
| return "" | |
| # Layout: Left (Input) | Right (Output) | |
| col1, col2 = st.columns([1, 1]) | |
| with col1: | |
| st.subheader("π Input Source") | |
| blog_url = st.text_input("Enter a blog URL (optional):", key="blog_url") | |
| # Initialize fetched_content as empty string. | |
| fetched_content = "" | |
| if blog_url: | |
| fetched_content = fetch_blog_content(blog_url) | |
| if fetched_content: | |
| st.success("Blog content fetched successfully!") | |
| else: | |
| st.warning("Unable to fetch blog content. Please paste your content manually.") | |
| # The text area uses fetched_content as its default value. | |
| transcript = st.text_area("Or paste your content here:", value=fetched_content, height=400, key="transcript") | |
| with col2: | |
| st.subheader("π Generated Content Plan") | |
| generated_plan_container = st.container() | |
| generate_button = st.button("Generate Plan") | |
| time_placeholder = st.empty() | |
| if generate_button: | |
| if not transcript.strip(): | |
| st.error("β Please enter a transcript.") | |
| else: | |
| with st.spinner("β³ Generating content plan... Please wait."): | |
| try: | |
| # Define prompts | |
| system_prompt = SYSTEM_MESSAGE | |
| user_prompt = USER_MESSAGE.format(source_content=transcript) | |
| messages = [ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt}, | |
| ] | |
| # Create placeholder for dynamic streaming | |
| generated_plan_container.empty() | |
| openai_args = { | |
| "model": MODEL, | |
| "messages": messages, | |
| "response_format": {"type": "json_object"}, | |
| } | |
| if MODEL == 'o3-mini': | |
| openai_args['reasoning_effort'] = "low" | |
| else: | |
| openai_args["max_tokens"] = 5000 | |
| openai_args["temperature"] = 0.45 | |
| # Stream OpenAI API Response | |
| start_time = time.time() | |
| response = client.chat.completions.create(**openai_args) | |
| end_time = time.time() | |
| # Parse the response | |
| generated_response = response.choices[0].message.content.strip() | |
| content_plan = json.loads(generated_response) | |
| # Extract key (assuming there is only one key in the JSON response) | |
| plan_key = list(content_plan.keys())[0] | |
| blueprint_plans = content_plan.get(plan_key, []) | |
| # Display time taken | |
| elapsed_time = end_time - start_time | |
| time_placeholder.markdown(f"#### β±οΈ Response Time: **{elapsed_time:.2f} seconds**") | |
| # Display final output | |
| if blueprint_plans: | |
| with generated_plan_container.container(): | |
| for i, plan in enumerate(blueprint_plans): | |
| st.markdown(f"### π¬ Blueprint Plan {i + 1}") | |
| st.write(f"**Blueprint:** {plan.get('Blueprint', 'N/A')}") | |
| st.write(f"**Description:** {plan.get('Description', 'N/A')}") | |
| st.write(f"**Content Focus:** {plan.get('Content Focus', 'N/A')}") | |
| st.write(f"**Narrative Thread:** {plan.get('Narrative Thread', 'N/A')}") | |
| st.write(f"**Rationale:** {plan.get('Rationale', 'N/A')}") | |
| st.markdown("---") | |
| else: | |
| st.error("β οΈ No plans were generated. Try again.") | |
| except json.JSONDecodeError: | |
| st.error("β οΈ Failed to parse OpenAI response. Try again.") | |
| except Exception as e: | |
| st.error(f"β Error: {str(e)}") | |