Alpha108 commited on
Commit
a4adc35
·
verified ·
1 Parent(s): 4e40f65

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +87 -0
app.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import json
3
+ from transformers import pipeline
4
+ from huggingface_hub import login
5
+ import os
6
+
7
+ # Get API key from Hugging Face secrets (set in repo settings)
8
+ HUGGINGFACE_TOKEN = os.environ.get("HUGGINGFACE_TOKEN")
9
+ if HUGGINGFACE_TOKEN:
10
+ login(token=HUGGINGFACE_TOKEN)
11
+
12
+ # Load example style samples from local JSON
13
+ SAMPLE_FILE = "style_samples.json"
14
+ if os.path.exists(SAMPLE_FILE):
15
+ with open(SAMPLE_FILE, "r") as f:
16
+ style_samples = json.load(f)
17
+ else:
18
+ style_samples = []
19
+
20
+ # Cache the pipeline for fast reload
21
+ @st.cache_resource(show_spinner=False)
22
+ def load_pipeline():
23
+ # Try loading a fast, modern model — can swap to desired model (Llama, Mixtral...)
24
+ # For example: "meta-llama/Llama-2-7b-chat-hf" or any open model
25
+ model_id = "google/flan-t5-base" # Replace with your favorite
26
+ pipe = pipeline("text2text-generation", model=model_id)
27
+ return pipe
28
+
29
+ pipe = load_pipeline()
30
+
31
+ # UI
32
+ st.set_page_config(page_title="LinkedIn Post Generator", layout="centered")
33
+ st.title("🔗 LinkedIn Post Generator (Hugging Face)")
34
+ st.write("Generate high-quality LinkedIn posts using GenAI. Provide a topic, select style, and go!")
35
+
36
+ with st.form("post_form"):
37
+ topic = st.text_input("Post topic", "Generative AI for Business")
38
+ tone = st.selectbox("Tone", ["Professional", "Friendly", "Inspirational", "Technical", "Concise"])
39
+ audience = st.text_input("Audience", "Startup founders")
40
+ length = st.slider("Post length (words)", 50, 500, 150, 10)
41
+ style_option = st.selectbox(
42
+ "Choose style sample",
43
+ ["None"] + [f"Sample {i+1}" for i in range(len(style_samples))]
44
+ )
45
+ custom_style = st.text_area("Or paste your own style example", "")
46
+ submit = st.form_submit_button("Generate LinkedIn Post")
47
+
48
+ prompt_examples = ""
49
+ if style_option != "None":
50
+ example_idx = int(style_option.split()[1]) - 1
51
+ prompt_examples += f"Example style post: {style_samples[example_idx]}\n"
52
+ if custom_style.strip():
53
+ prompt_examples += f"User style sample: {custom_style}\n"
54
+
55
+ # Compose the prompt
56
+ prompt = (
57
+ f"Write a LinkedIn post on the topic '{topic}'.\n"
58
+ f"Tone: {tone}.\nAudience: {audience}.\nTarget length: {length} words.\n{prompt_examples}"
59
+ "Instructions: The post should engage the specified audience, use the provided style, and end with a call to action."
60
+ )
61
+
62
+ # Results area
63
+ if submit:
64
+ if not topic.strip():
65
+ st.warning("Please enter a topic for your post.")
66
+ else:
67
+ with st.spinner("Generating post..."):
68
+ try:
69
+ result = pipe(prompt, max_new_tokens=length + 50) # Allow room for model excess
70
+ gen_text = result[0]['generated_text'].strip()
71
+ st.success("Here's your LinkedIn post:")
72
+ st.write(gen_text)
73
+ except Exception as e:
74
+ st.error(f"Error generating post: {e}")
75
+
76
+ st.markdown("---")
77
+ st.write("Upload or edit your own style samples below for future runs.")
78
+
79
+ uploaded_file = st.file_uploader("Upload style_samples.json (sample LinkedIn posts)", type=["json"])
80
+ if uploaded_file:
81
+ try:
82
+ uploaded_samples = json.load(uploaded_file)
83
+ with open(SAMPLE_FILE, "w") as fout:
84
+ json.dump(uploaded_samples, fout)
85
+ st.success(f"Sample file uploaded and saved ({len(uploaded_samples)} samples). Refresh the app to use them.")
86
+ except Exception as e:
87
+ st.error(f"Failed to read uploaded file: {e}")