Alpha108 commited on
Commit
2f6fb2e
·
verified ·
1 Parent(s): 53f4745

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -17
app.py CHANGED
@@ -13,9 +13,9 @@ def load_style_samples():
13
 
14
  @st.cache_resource(show_spinner=False)
15
  def load_pipeline():
16
- # Choose a lightweight, CPU-friendly model for Spaces
17
  model_id = "google/flan-t5-base"
18
- # IMPORTANT: Do NOT pass device_map here to avoid needing accelerate
19
  gen_pipe = pipeline(
20
  task="text2text-generation",
21
  model=model_id
@@ -27,7 +27,7 @@ style_samples = load_style_samples()
27
 
28
  st.set_page_config(page_title="LinkedIn Post Generator", layout="centered")
29
  st.title("🔗 LinkedIn Post Generator (Hugging Face)")
30
- st.write("Generate LinkedIn posts in your style using a compact open model.")
31
 
32
  with st.form("gen_form"):
33
  topic = st.text_input("Post Topic", "Generative AI for Business")
@@ -39,37 +39,70 @@ with st.form("gen_form"):
39
  ["None"] + [f"Sample {i+1}" for i in range(len(style_samples))]
40
  )
41
  custom_style = st.text_area("Or paste your own style sample (optional)")
 
 
 
 
 
 
 
42
  submitted = st.form_submit_button("Generate Post")
43
 
44
- prompt_style = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  if use_sample != "None":
46
  idx = int(use_sample.split()[1]) - 1
47
- prompt_style += f"Sample style: {style_samples[idx]}\n"
48
  if custom_style.strip():
49
- prompt_style += f"User style: {custom_style}\n"
50
-
51
- prompt = (
52
- f"Write a LinkedIn post on '{topic}'.\n"
53
- f"Tone: {tone}. Audience: {audience}. Length: about {length} words.\n"
54
- f"{prompt_style}"
55
- "Write in engaging, natural language, and end with a strong call to action."
56
- )
57
 
58
  if submitted:
59
  if not topic.strip():
60
  st.warning("Please enter a topic.")
61
  else:
 
62
  with st.spinner("Generating..."):
63
  try:
64
- # flan-t5 uses text2text; pipeline returns list of dicts
65
- outputs = pipe(prompt, max_new_tokens=length + 48)
66
- # Support both list and dict return shapes defensively
67
- if isinstance(outputs, list) and len(outputs) and "generated_text" in outputs[0]:
 
 
 
 
 
 
68
  result = outputs[0]["generated_text"].strip()
69
  elif isinstance(outputs, dict) and "generated_text" in outputs:
70
  result = outputs["generated_text"].strip()
71
  else:
72
  result = str(outputs)
 
73
  st.success("Here's your LinkedIn post:")
74
  st.write(result)
75
  st.download_button("Download post as .txt", result, file_name="linkedin_post.txt")
 
13
 
14
  @st.cache_resource(show_spinner=False)
15
  def load_pipeline():
16
+ # CPU-friendly seq2seq model; swap later if you upgrade hardware
17
  model_id = "google/flan-t5-base"
18
+ # Avoid device_map to prevent Accelerate requirement on Spaces CPU
19
  gen_pipe = pipeline(
20
  task="text2text-generation",
21
  model=model_id
 
27
 
28
  st.set_page_config(page_title="LinkedIn Post Generator", layout="centered")
29
  st.title("🔗 LinkedIn Post Generator (Hugging Face)")
30
+ st.write("Generate LinkedIn posts with few-shot style guidance.")
31
 
32
  with st.form("gen_form"):
33
  topic = st.text_input("Post Topic", "Generative AI for Business")
 
39
  ["None"] + [f"Sample {i+1}" for i in range(len(style_samples))]
40
  )
41
  custom_style = st.text_area("Or paste your own style sample (optional)")
42
+
43
+ with st.expander("Advanced generation settings"):
44
+ temperature = st.slider("Temperature", 0.1, 1.2, 0.7, 0.05)
45
+ top_p = st.slider("Top-p (nucleus)", 0.1, 1.0, 0.9, 0.05)
46
+ repetition_penalty = st.slider("Repetition penalty", 1.0, 2.0, 1.15, 0.05)
47
+ no_repeat_ngram_size = st.slider("No-repeat n-gram size", 1, 6, 3, 1)
48
+
49
  submitted = st.form_submit_button("Generate Post")
50
 
51
+ def build_prompt(topic, audience, tone, length, style_example_text):
52
+ # Structured prompt to reduce repetition and produce LinkedIn-ready content
53
+ return (
54
+ "Task: Write a LinkedIn post.\n\n"
55
+ f"Topic: \"{topic}\"\n"
56
+ f"Audience: \"{audience}\"\n"
57
+ f"Tone: \"{tone}\"\n"
58
+ f"Target length: ~{length} words.\n\n"
59
+ "Style requirements:\n"
60
+ "- Start with a 1–2 line hook with a concrete claim or question.\n"
61
+ "- Use 2–3 short paragraphs; keep sentences under 20 words.\n"
62
+ "- Add 3–5 specific insights or steps (use bullet points if helpful).\n"
63
+ "- End with a clear CTA (ask a question or invite comments).\n\n"
64
+ "Constraints:\n"
65
+ "- No repeated sentences or filler phrases.\n"
66
+ "- Avoid clichés like “it's a great example of how we can make a difference in the world.”\n"
67
+ "- Use plain business English.\n\n"
68
+ f"Reference style:\n{style_example_text}\n\n"
69
+ "Output format:\n"
70
+ "HOOK:\n"
71
+ "BODY:\n"
72
+ "TAKEAWAY:\n"
73
+ "CTA:\n"
74
+ )
75
+
76
+ style_example_text = ""
77
  if use_sample != "None":
78
  idx = int(use_sample.split()[1]) - 1
79
+ style_example_text += f"Sample style:\n{style_samples[idx]}\n"
80
  if custom_style.strip():
81
+ style_example_text += f"Custom style:\n{custom_style}\n"
 
 
 
 
 
 
 
82
 
83
  if submitted:
84
  if not topic.strip():
85
  st.warning("Please enter a topic.")
86
  else:
87
+ prompt = build_prompt(topic, audience, tone, length, style_example_text)
88
  with st.spinner("Generating..."):
89
  try:
90
+ outputs = pipe(
91
+ prompt,
92
+ max_new_tokens=length + 120, # give space for sections
93
+ temperature=temperature,
94
+ top_p=top_p,
95
+ repetition_penalty=repetition_penalty,
96
+ no_repeat_ngram_size=no_repeat_ngram_size
97
+ )
98
+ # Pipeline may return list or dict; handle both
99
+ if isinstance(outputs, list) and outputs and "generated_text" in outputs[0]:
100
  result = outputs[0]["generated_text"].strip()
101
  elif isinstance(outputs, dict) and "generated_text" in outputs:
102
  result = outputs["generated_text"].strip()
103
  else:
104
  result = str(outputs)
105
+
106
  st.success("Here's your LinkedIn post:")
107
  st.write(result)
108
  st.download_button("Download post as .txt", result, file_name="linkedin_post.txt")