SocialPlus / app.py
NickAi88's picture
Update app.py
6f23309 verified
import gradio as gr
import uuid
from datetime import datetime
import random
import os
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
import torch
from huggingface_hub import login, HfApi
# Token setup - Read from environment variables
HF_TOKEN_READ = os.environ.get("HF_TOKEN_READ", "")
HF_TOKEN_WRITE = os.environ.get("HF_TOKEN_WRITE", "")
# Login with appropriate token
if HF_TOKEN_READ:
try:
login(token=HF_TOKEN_READ)
print("Logged in with read token")
except Exception as e:
print(f"Error logging in with read token: {e}")
# Initialize HF API for write operations (if token is available)
hf_api = None
if HF_TOKEN_WRITE:
try:
hf_api = HfApi(token=HF_TOKEN_WRITE)
print("HF API initialized with write token")
except Exception as e:
print(f"Error initializing HF API: {e}")
# Model loading function with token support
@gr.Cache()
def load_model(model_name="microsoft/DialoGPT-medium"):
"""Load a Hugging Face model for text generation"""
try:
# Use read token if available
token = HF_TOKEN_READ if HF_TOKEN_READ else None
tokenizer = AutoTokenizer.from_pretrained(
model_name,
token=token,
use_auth_token=token is not None
)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(
model_name,
token=token,
use_auth_token=token is not None
)
return tokenizer, model
except Exception as e:
print(f"Error loading model: {e}")
return None, None
# Save content to Hugging Face Hub (if write token is available)
def save_to_hub(content, filename="social_content.txt", repo_id="your-username/social-media-content"):
"""Save generated content to Hugging Face Hub"""
if not hf_api or not HF_TOKEN_WRITE:
return "Write token not configured. Content not saved to Hub."
try:
# Create a temporary file
with open(filename, "w") as f:
f.write(content)
# Upload to Hub
hf_api.upload_file(
path_or_fileobj=filename,
path_in_repo=filename,
repo_id=repo_id,
repo_type="dataset",
commit_message=f"Add social media content - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
)
# Clean up
os.remove(filename)
return f"Content saved to Hub: https://huggingface.co/datasets/{repo_id}"
except Exception as e:
return f"Error saving to Hub: {e}"
# Fallback content generation
def fallback_generate_content(prompt, platform):
"""Fallback content generation if AI models fail"""
responses = {
"Instagram": [
f"✨ {prompt.title()} ✨\n\nReady to transform your approach? Here's what you need to know:\n\n🔥 Key insight that changes everything\n💡 Pro tip that most people miss\n⚡ Action step you can take today\n\nWhat's your experience? Share in the comments! 👇",
f"🌟 Behind the scenes of {prompt} 🌟\n\nSharing our process and what makes it special!\n\nEver wondered about {prompt}? Let us know your questions below! ⬇️",
],
"TikTok": [
f"POV: You finally understand {prompt} 🤯\n\n*shows before and after*\n\nThe secret? [key insight]\n\nWho else needed to hear this? 💪",
f"Wait until you try this {prompt} hack! 👀\n\nGame changer alert! 🚨\n\nSave this for later! ⬇️",
],
"Both": [
f"Cross-platform content for {prompt} 📱\n\nCreating value across different channels!\n\nWhat platform do you prefer? Let me know! 🗣️",
]
}
return random.choice(responses.get(platform, responses["Both"]))
# AI-powered content generation
def generate_content(prompt, platform, max_length=150):
"""Generate content using Hugging Face models"""
try:
# Load an appropriate model based on platform
if platform == "Instagram":
model_name = "microsoft/DialoGPT-medium"
else:
model_name = "gpt2"
tokenizer, model = load_model(model_name)
if tokenizer is None or model is None:
return fallback_generate_content(prompt, platform)
# Format prompt for better results
formatted_prompt = f"Create engaging {platform} content about: {prompt}"
# Generate content
inputs = tokenizer.encode(formatted_prompt, return_tensors="pt")
attention_mask = torch.ones(inputs.shape, dtype=torch.long)
outputs = model.generate(
inputs,
max_length=max_length,
num_return_sequences=1,
temperature=0.8,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
attention_mask=attention_mask
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Clean up the output
if generated_text.startswith(formatted_prompt):
generated_text = generated_text[len(formatted_prompt):].strip()
return generated_text if generated_text else fallback_generate_content(prompt, platform)
except Exception as e:
print(f"Error in generate_content: {e}")
return fallback_generate_content(prompt, platform)
# Improved hashtag generation
def generate_hashtags(topic, platform):
"""Generate relevant hashtags using AI"""
try:
# Use read token if available
token = HF_TOKEN_READ if HF_TOKEN_READ else None
# Use a pipeline for text generation
generator = pipeline(
'text-generation',
model='gpt2',
token=token,
use_auth_token=token is not None
)
prompt = f"Generate 5 relevant hashtags for {topic} on {platform}:"
result = generator(prompt, max_length=50, num_return_sequences=1)
hashtags = result[0]['generated_text'].replace(prompt, '').strip()
# Clean up and format hashtags
hashtag_list = [tag.strip().replace(' ', '') for tag in hashtags.split()[:5]]
hashtags = ' '.join(['#' + tag for tag in hashtag_list if tag])
return hashtags if hashtags else fallback_hashtags(topic, platform)
except Exception as e:
print(f"Error generating hashtags: {e}")
return fallback_hashtags(topic, platform)
def fallback_hashtags(topic, platform):
"""Fallback hashtag generation"""
base_hashtags = topic.lower().replace(' ', '').replace(',', ' #')
if platform == "Instagram":
platform_tags = "#instagood #photooftheday #instadaily #motivation #inspiration"
elif platform == "TikTok":
platform_tags = "#fyp #foryou #viral #trending #tiktok"
else:
platform_tags = "#content #socialmedia #digital"
return f"#{base_hashtags} {platform_tags}"
# Content improvement function
def improve_content(content, platform, tone):
"""Improve existing content using AI"""
try:
# Use read token if available
token = HF_TOKEN_READ if HF_TOKEN_READ else None
improver = pipeline(
'text2text-generation',
model='google/flan-t5-base',
token=token,
use_auth_token=token is not None
)
prompt = f"Improve this {platform} content to make it more {tone}: {content}"
result = improver(prompt, max_length=200)
return result[0]['generated_text']
except Exception as e:
print(f"Error improving content: {e}")
return content
# Main content generation function
def generate_social_media_content(platform, content_type, topic, target_audience, tone, length, brand_voice, key_message, call_to_action, model_choice):
"""Generate social media content based on inputs"""
# Generate main content
content_prompt = f"Create a {content_type.lower()} for {platform} about {topic} targeting {target_audience} in a {tone.lower()} tone"
generated_content = generate_content(content_prompt, platform)
# Generate hashtags
hashtags = generate_hashtags(topic, platform)
# Calculate stats
content_text = generated_content
char_count = len(content_text)
word_count = len(content_text.split())
# Platform-specific feedback
platform_feedback = ""
if platform == "Instagram":
if char_count > 2200:
platform_feedback = "⚠️ Caption may be too long for Instagram"
else:
platform_feedback = "✅ Good length for Instagram"
elif platform == "TikTok":
if char_count > 150:
platform_feedback = "⚠️ Consider shorter text for TikTok"
else:
platform_feedback = "✅ Perfect for TikTok"
# Format output
output = f"""
# {content_type} for {platform}
## Content:
{generated_content}
## Hashtags:
{hashtags}
## Stats:
- Characters: {char_count}
- Words: {word_count}
- {platform_feedback}
## Details:
- Target: {target_audience}
- Tone: {tone}
- CTA: {call_to_action}
- AI Model: {model_choice}
- Created: {datetime.now().strftime('%Y-%m-%d %H:%M')}
"""
return output
def get_trending_topics():
"""Get trending topic suggestions"""
trending = [
"Productivity hacks", "Morning routine", "Self care Sunday",
"Workspace setup", "Healthy recipes", "Weekend vibes",
"Goal setting", "Mindfulness tips"
]
return random.choice(trending)
def load_template(platform, template_name):
"""Load a content template"""
templates = {
"Instagram": {
"Product Showcase": {
"template": "🌟 Introducing [PRODUCT NAME] 🌟\n\n[KEY BENEFIT] that [SOLVES PROBLEM]\n\n✨ Perfect for [TARGET AUDIENCE]\n💫 [UNIQUE FEATURE]\n🎯 [CALL TO ACTION]\n\n[HASHTAGS]",
"example": "🌟 Introducing our New Workout Planner 🌟\n\nStay organized and motivated with our 90-day fitness tracker\n\n✨ Perfect for busy professionals\n💫 Includes meal planning section\n🎯 Link in bio to get yours!\n\n#fitness #workout #planning"
},
"Behind the Scenes": {
"template": "Behind the scenes at [LOCATION/EVENT] 👀\n\n[WHAT'S HAPPENING]\n\n[INTERESTING DETAIL]\n\nWhat would you like to see more of?\n\n[HASHTAGS]",
"example": "Behind the scenes at our studio today 👀\n\nCreating content for our new product launch\n\nOur team worked 12 hours straight but the energy was amazing!\n\nWhat would you like to see more of?\n\n#behindthescenes #teamwork #creative"
}
},
"TikTok": {
"Tutorial": {
"template": "How to [SKILL/TASK] in [TIME] ⏰\n\nStep 1: [ACTION]\nStep 2: [ACTION] \nStep 3: [ACTION]\n\nTry it and let me know! 💪\n\n[HASHTAGS]",
"example": "How to meal prep in 30 minutes ⏰\n\nStep 1: Choose 3 proteins\nStep 2: Prep all veggies first\nStep 3: Cook everything at once\n\nTry it and let me know! 💪\n\n#mealprep #cooking #lifehacks"
},
"Trending Challenge": {
"template": "[TREND] but make it [YOUR NICHE] ✨\n\n[YOUR TWIST ON THE TREND]\n\nWho else does this? 😂\n\n[HASHTAGS]",
"example": "Tell me you're a plant parent without telling me ✨\n\nMe: Has 47 plants and knows each one's watering schedule by heart\n\nWho else does this? 😂\n\n#plantparent #planttok #relatable"
}
}
}
if platform in templates and template_name in templates[platform]:
return f"### {template_name} Template for {platform}\n\n```\n{templates[platform][template_name]['template']}\n```\n\n#### Example:\n{templates[platform][template_name]['example']}"
return "Template not found"
# Gradio interface
with gr.Blocks(
title="Social Media Content Creator",
theme=gr.themes.Soft(),
css="""
.gradio-container {
max-width: 1200px !important;
}
.output-markdown {
padding: 20px;
border-radius: 10px;
background: #f8f9fa;
border-left: 4px solid #667eea;
}
.header {
text-align: center;
padding: 20px;
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
border-radius: 10px;
margin-bottom: 20px;
}
.token-status {
padding: 10px;
border-radius: 5px;
margin-bottom: 10px;
}
.token-ok {
background: #d4edda;
color: #155724;
border: 1px solid #c3e6cb;
}
.token-warning {
background: #fff3cd;
color: #856404;
border: 1px solid #ffeeba;
}
"""
) as demo:
with gr.Column():
gr.Markdown("""
<div class="header">
<h1>📱 Social Media Content Creator</h1>
<p>Create engaging content for Instagram and TikTok with AI assistance</p>
</div>
""")
# Token status indicator
token_status = "🔑 Read Token: " + ("✅ Available" if HF_TOKEN_READ else "❌ Not configured")
token_status += " | Write Token: " + ("✅ Available" if HF_TOKEN_WRITE else "❌ Not configured")
token_status_class = "token-ok" if HF_TOKEN_READ else "token-warning"
gr.Markdown(f"""<div class="token-status {token_status_class}">{token_status}</div>""")
with gr.Tab("Create Content"):
with gr.Row():
with gr.Column(scale=2):
with gr.Row():
platform = gr.Dropdown(
choices=["Instagram", "TikTok", "Both"],
label="Platform",
value="Instagram"
)
model_choice = gr.Dropdown(
choices=["Auto-Select", "DialoGPT (Instagram)", "GPT-2 (TikTok)", "FLAN-T5"],
label="AI Model",
value="Auto-Select",
interactive=True
)
content_type = gr.Dropdown(
choices=[
"Post Caption", "Story Text", "Reel Script",
"TikTok Video Script", "Hashtag Strategy",
"Bio Update", "Product Showcase"
],
label="Content Type",
value="Post Caption"
)
topic = gr.Textbox(
label="Topic/Subject",
placeholder="What's your content about?"
)
target_audience = gr.Textbox(
label="Target Audience",
placeholder="Young professionals, fitness enthusiasts, etc."
)
with gr.Row():
tone = gr.Dropdown(
choices=["Casual", "Professional", "Fun", "Inspirational", "Educational", "Trendy"],
label="Tone",
value="Casual"
)
post_length = gr.Dropdown(
choices=["Short", "Medium", "Long"],
label="Length",
value="Medium"
)
gr.Markdown("### Content Details")
brand_voice = gr.Textbox(
label="Brand Voice/Style",
placeholder="Describe your brand personality..."
)
key_message = gr.Textbox(
label="Key Message",
placeholder="Main point you want to communicate"
)
call_to_action = gr.Textbox(
label="Call to Action",
placeholder="What should viewers do?"
)
generate_btn = gr.Button("Generate Content", variant="primary")
with gr.Column(scale=1):
gr.Markdown("### Content Guidelines")
instagram_tips = gr.Markdown("""
**Instagram Best Practices:**
- Captions: 125-150 characters for feed
- Stories: Short, engaging text
- Reels: Hook in first 3 seconds
- Use 5-10 relevant hashtags
- Include call-to-action
""")
tiktok_tips = gr.Markdown("""
**TikTok Best Practices:**
- Hook viewers in first 2-3 seconds
- Keep text concise and punchy
- Use trending sounds/effects
- 3-5 hashtags max
- Include trending challenges
""", visible=False)
both_tips = gr.Markdown("""
**Multi-Platform Tips:**
- Adapt content for each platform
- Different hashtag strategies
- Vary content length
- Platform-specific features
""", visible=False)
gr.Markdown("### Quick Actions")
with gr.Row():
trending_btn = gr.Button("Trending Topics")
hashtags_btn = gr.Button("Hashtags Only")
gr.Markdown("### Content Ideas")
content_ideas = [
"Behind the scenes", "Day in the life", "Tutorial/How-to",
"Product showcase", "User-generated content", "Trending challenge",
"Q&A session", "Before/After"
]
with gr.Row():
for idea in content_ideas[:4]:
gr.Button(idea, size="sm").click(
lambda x=idea: x,
outputs=topic
)
with gr.Row():
for idea in content_ideas[4:]:
gr.Button(idea, size="sm").click(
lambda x=idea: x,
outputs=topic
)
# Output section
output = gr.Markdown(elem_classes="output-markdown")
# Improvement UI
with gr.Row():
improve_btn = gr.Button("Improve with AI", variant="secondary")
enhance_tone = gr.Dropdown(
choices=["Casual", "Professional", "Fun", "Inspirational", "Educational", "Trendy"],
label="Enhance Tone",
value="Casual"
)
# Save to Hub button (only show if write token is available)
if HF_TOKEN_WRITE:
save_btn = gr.Button("💾 Save to Hub", variant="secondary")
# Event handlers
def update_tips(selected_platform):
if selected_platform == "Instagram":
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
elif selected_platform == "TikTok":
return gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
else:
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
platform.change(
update_tips,
inputs=platform,
outputs=[instagram_tips, tiktok_tips, both_tips]
)
def set_trending_topic():
return get_trending_topics()
trending_btn.click(
set_trending_topic,
outputs=topic
)
def generate_hashtags_only(topic, platform):
if topic:
return f"**Hashtags for {topic}:**\n\n{generate_hashtags(topic, platform)}"
return "Please enter a topic first"
hashtags_btn.click(
generate_hashtags_only,
inputs=[topic, platform],
outputs=output
)
generate_btn.click(
generate_social_media_content,
inputs=[
platform, content_type, topic, target_audience,
tone, post_length, brand_voice, key_message, call_to_action, model_choice
],
outputs=output
)
def improve_existing_content(content, platform, tone):
if content and "## Content:" in content:
# Extract the actual content part
content_part = content.split("## Content:")[1].split("## Hashtags:")[0].strip()
improved = improve_content(content_part, platform, tone)
# Reconstruct the output
parts = content.split("## Content:")
parts[1] = f"\n{improved}\n\n"
return "## Content:".join(parts)
return content
improve_btn.click(
improve_existing_content,
inputs=[output, platform, enhance_tone],
outputs=output
)
# Save to Hub handler
if HF_TOKEN_WRITE:
def save_content_to_hub(content):
if not content:
return "No content to save"
return save_to_hub(content)
save_btn.click(
save_content_to_hub,
inputs=output,
outputs=gr.Markdown()
)
with gr.Tab("Templates"):
gr.Markdown("### Content Templates")
with gr.Row():
template_platform = gr.Dropdown(
choices=["Instagram", "TikTok"],
label="Platform",
value="Instagram"
)
template_name = gr.Dropdown(
choices=["Product Showcase", "Behind the Scenes", "Tutorial", "Trending Challenge"],
label="Template",
value="Product Showcase"
)
template_output = gr.Markdown(elem_classes="output-markdown")
load_template_btn = gr.Button("Load Template", variant="primary")
load_template_btn.click(
load_template,
inputs=[template_platform, template_name],
outputs=template_output
)
with gr.Tab("Analytics"):
gr.Markdown("### Content Performance Analytics")
with gr.Row():
with gr.Column():
gr.Markdown("#### Instagram Performance")
gr.Markdown("- Posts Created: 12\n- Avg Engagement Rate: 4.2%\n- Best Performing Post: Behind the scenes")
with gr.Column():
gr.Markdown("#### TikTok Performance")
gr.Markdown("- Videos Created: 8\n- Avg View Rate: 12.8%\n- Best Performing Video: Tutorial content")
gr.Markdown("---")
gr.Markdown("Connect your social media accounts to see real analytics data")
if __name__ == "__main__":
demo.launch()