Spaces:
Sleeping
Sleeping
| import os | |
| import re | |
| import streamlit as st | |
| from llm_groq import generate_post, transform_post, generate_hooks, DEFAULT_MODEL | |
| from prompts import build_quick_prompt, build_post_prompt, transform_instruction | |
| from data_utils import load_posts, extract_keywords, dedupe_sentences, strip_labels | |
| from ui_components import quick_controls, pro_controls | |
| st.set_page_config(page_title="LinkedIn Post Generator — Groq", layout="centered") | |
| st.title("LinkedIn Post Generator — Quick & Pro ") | |
| # Sidebar | |
| with st.sidebar: | |
| st.subheader("Groq & Decoding") | |
| model = st.selectbox("Model", [DEFAULT_MODEL, "llama-3.1-8b-instant", "mixtral-8x7b-32768"], index=0, key="sb_model") | |
| temperature = st.slider("Temperature", 0.1, 1.2, 0.6, 0.05, key="sb_temp") | |
| top_p = st.slider("Top‑p", 0.1, 1.0, 0.9, 0.05, key="sb_topp") | |
| st.markdown("Set GROQ_API_KEY in Space → Settings → Variables & Secrets.") | |
| tabs = st.tabs(["Quick Draft", "Pro Mode", "History"]) | |
| if "history" not in st.session_state: | |
| st.session_state.history = [] | |
| def quick_quality_fix(text, want_hashtags=True, allow_emoji=True): | |
| lines = [l for l in text.strip().splitlines() if l.strip()] | |
| if len(lines) < 4 or len(lines) > 7: | |
| return None | |
| if not allow_emoji: | |
| text = re.sub(r"[^\w\s#.,:;%&()\-\+\[\]{}'\"/]", "", text) | |
| tags = re.findall(r"#\w+", text) | |
| if not want_hashtags and tags: | |
| for t in tags: | |
| text = text.replace(t, "") | |
| if want_hashtags and len(tags) > 2: | |
| for t in tags[2:]: | |
| text = text.replace(t, "") | |
| return text.strip() | |
| # Quick Draft | |
| with tabs[0]: | |
| idea, tone, words, variations, include_emoji, add_hashtags, language = quick_controls() | |
| if st.button("Generate", key="qd_generate"): | |
| if not os.getenv("GROQ_API_KEY"): | |
| st.error("GROQ_API_KEY missing.") | |
| elif not idea.strip(): | |
| st.warning("Enter your idea.") | |
| else: | |
| prompt = build_quick_prompt(idea, tone, words, include_emoji, add_hashtags, language) | |
| posts = [] | |
| with st.spinner("Generating…"): | |
| try: | |
| max_tokens = max(200, min(1200, int(words*1.6)+120)) | |
| for _ in range(variations): | |
| raw = generate_post(prompt, model, temperature, top_p, max_tokens) | |
| clean = dedupe_sentences(strip_labels(raw)) | |
| fixed = quick_quality_fix(clean, want_hashtags=add_hashtags, allow_emoji=include_emoji) | |
| if fixed is None: | |
| corrective = ( | |
| prompt | |
| + f"\n\nRegenerate a full LinkedIn post around {words} words total, " | |
| "structured in 4–6 short paragraphs (each 2–3 lines). " | |
| "Keep it scannable, professional, and engaging. " | |
| "include one concrete metric or date, " | |
| f"{'max 5 emoji' if include_emoji else 'no emojis'}, " | |
| f"{'1–2 niche hashtags at the end' if add_hashtags else 'no hashtags'}." | |
| ) | |
| raw2 = generate_post(corrective, model, temperature, top_p, max_tokens) | |
| clean2 = dedupe_sentences(strip_labels(raw2)) | |
| fixed = quick_quality_fix(clean2, want_hashtags=add_hashtags, allow_emoji=include_emoji) or clean2 | |
| posts.append(fixed) | |
| except Exception as e: | |
| st.error(f"Generation failed: {e}") | |
| posts = [] | |
| for i, p in enumerate(posts, start=1): | |
| st.markdown(f"#### Post {i}") | |
| st.write(p) | |
| st.download_button(f"Download Post {i}", p, file_name=f"post_{i}.txt", key=f"qd_dl_{i}") | |
| if posts: | |
| st.session_state.history.append({"mode":"quick","idea":idea,"tone":tone,"words":words,"posts":posts}) | |
| # Pro Mode | |
| with tabs[1]: | |
| st.markdown("Upload CSV/JSON of past posts (must include 'text') to auto-extract keywords (optional).") | |
| uploaded = st.file_uploader("Upload dataset", type=["csv","json"], key="pro_upload") | |
| defaults = {"topic":"AI agent playbooks for startup ops","audience":"SaaS founders in early stage"} | |
| topic, purpose, audience, tone2, language2, evidence, style_text = pro_controls(defaults) | |
| keywords = [] | |
| if uploaded is not None: | |
| try: | |
| df = load_posts(uploaded) | |
| keywords = extract_keywords(topic, df) | |
| st.success(f"Loaded {len(df)} posts. Extracted keywords: {', '.join(keywords[:8]) or '—'}") | |
| except Exception as e: | |
| st.error(f"Dataset error: {e}") | |
| if st.button("Suggest 5 hooks", key="pro_hooks_btn"): | |
| try: | |
| hooks = generate_hooks(topic, audience, tone2, 5, model, temperature, top_p, 200) | |
| st.code(hooks) | |
| except Exception as e: | |
| st.error(f"Hook generation failed: {e}") | |
| chosen_hook = st.text_input("Chosen opening line (optional)", key="pro_chosen_hook") | |
| outcome = st.text_input("Desired outcome (e.g., 10 demo requests this week)", value="", key="pro_outcome") | |
| extra_detail = st.text_input("One concrete detail (e.g., 'onboarding 14→3 days')", value="", key="pro_detail") | |
| clarifier_notes = "\n".join([f"Outcome: {outcome}" if outcome else "", f"Detail: {extra_detail}" if extra_detail else ""]).strip() | |
| style_cues = [s.strip() for s in style_text.splitlines() if s.strip()][:4] | |
| if st.button("Generate Post (Pro)", key="pro_generate"): | |
| if not os.getenv("GROQ_API_KEY"): | |
| st.error("GROQ_API_KEY missing.") | |
| else: | |
| try: | |
| prompt = build_post_prompt(topic, language2, tone2, 160, purpose, audience, evidence, keywords, style_cues, clarifier_notes, chosen_hook) | |
| raw = generate_post(prompt, model, temperature, top_p, 800) | |
| post = dedupe_sentences(strip_labels(raw)) | |
| st.success("Post") | |
| st.write(post) | |
| st.download_button("Download (.txt)", post, file_name="linkedin_post.txt", key="pro_download") | |
| st.session_state.history.append({"mode":"pro","topic":topic,"audience":audience,"post":post}) | |
| except Exception as e: | |
| st.error(f"Generation failed: {e}") | |
| # Refinements | |
| col1,col2,col3,col4,col5 = st.columns(5) | |
| def refine(kind): | |
| if st.session_state.get("history") and st.session_state.history[-1].get("post"): | |
| try: | |
| instr = transform_instruction(kind) | |
| raw = transform_post(instr, st.session_state.history[-1]["post"], model, temperature, top_p, 500) | |
| st.session_state.history[-1]["post"] = dedupe_sentences(strip_labels(raw)) | |
| except Exception as e: | |
| st.error(f"Refinement failed: {e}") | |
| if col1.button("Shorter", key="pro_shorter"): refine("shorter") | |
| if col2.button("Punchier hook", key="pro_punchy"): refine("punchier") | |
| if col3.button("Add data point", key="pro_adddata"): refine("add_data") | |
| if col4.button("No emojis", key="pro_noemoji"): refine("less_emoji") | |
| if col5.button("Add hashtags", key="pro_addtags"): refine("add_tags") | |
| if st.session_state.get("history") and st.session_state.history[-1].get("post"): | |
| st.write(st.session_state.history[-1]["post"]) | |
| # History | |
| with tabs[2]: | |
| if not st.session_state.history: | |
| st.info("No saved drafts yet.") | |
| else: | |
| for i, item in enumerate(reversed(st.session_state.history), start=1): | |
| st.markdown(f"#### Draft {i} ({item.get('mode')})") | |
| if "posts" in item: | |
| for j, p in enumerate(item["posts"], start=1): | |
| st.markdown(f"Post {j}") | |
| st.write(p) | |
| else: | |
| st.write(item.get("post","")) | |