NickAi88 commited on
Commit
6f23309
·
verified ·
1 Parent(s): 998a871

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +297 -63
app.py CHANGED
@@ -2,62 +2,225 @@ import gradio as gr
2
  import uuid
3
  from datetime import datetime
4
  import random
 
 
 
 
5
 
6
- # Mock function to simulate AI generation
7
- def generate_content(prompt, max_length=150):
8
- """Simulate AI content generation"""
9
- topics = prompt.lower().split()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  responses = {
11
- "instagram": [
12
  f"✨ {prompt.title()} ✨\n\nReady to transform your approach? Here's what you need to know:\n\n🔥 Key insight that changes everything\n💡 Pro tip that most people miss\n⚡ Action step you can take today\n\nWhat's your experience? Share in the comments! 👇",
13
  f"🌟 Behind the scenes of {prompt} 🌟\n\nSharing our process and what makes it special!\n\nEver wondered about {prompt}? Let us know your questions below! ⬇️",
14
- f"📸 Capturing the essence of {prompt}\n\nSometimes the simplest moments become the most memorable. \n\nTag someone who needs to see this! 🏷️"
15
  ],
16
- "tiktok": [
17
  f"POV: You finally understand {prompt} 🤯\n\n*shows before and after*\n\nThe secret? [key insight]\n\nWho else needed to hear this? 💪",
18
  f"Wait until you try this {prompt} hack! 👀\n\nGame changer alert! 🚨\n\nSave this for later! ⬇️",
19
- f"Duet this if you agree! 👇\n\n{prompt} is seriously underrated. \n\nWhat's your take? 🤔"
20
  ],
21
- "both": [
22
  f"Cross-platform content for {prompt} 📱\n\nCreating value across different channels!\n\nWhat platform do you prefer? Let me know! 🗣️",
23
- f"Multi-platform strategy for {prompt} 🎯\n\nDifferent content for different audiences!\n\nWhich platform works best for you? 📊"
24
  ]
25
  }
26
 
27
- # Simple logic to select response based on platform hint in prompt
28
- if "instagram" in prompt.lower():
29
- return random.choice(responses["instagram"])
30
- elif "tiktok" in prompt.lower():
31
- return random.choice(responses["tiktok"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  else:
33
- return random.choice(responses["both"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  # Main content generation function
36
- def generate_social_media_content(platform, content_type, topic, target_audience, tone, length, brand_voice, key_message, call_to_action):
37
  """Generate social media content based on inputs"""
38
 
39
  # Generate main content
40
  content_prompt = f"Create a {content_type.lower()} for {platform} about {topic} targeting {target_audience} in a {tone.lower()} tone"
41
- generated_content = generate_content(content_prompt)
42
 
43
  # Generate hashtags
44
  hashtags = generate_hashtags(topic, platform)
45
 
46
- # Create content object
47
- new_content = {
48
- 'id': str(uuid.uuid4()),
49
- 'platform': platform,
50
- 'type': content_type,
51
- 'topic': topic,
52
- 'content': generated_content,
53
- 'hashtags': hashtags,
54
- 'audience': target_audience,
55
- 'tone': tone,
56
- 'cta': call_to_action,
57
- 'created': datetime.now().strftime('%Y-%m-%d %H:%M'),
58
- 'status': 'Draft'
59
- }
60
-
61
  # Calculate stats
62
  content_text = generated_content
63
  char_count = len(content_text)
@@ -95,35 +258,18 @@ def generate_social_media_content(platform, content_type, topic, target_audience
95
  - Target: {target_audience}
96
  - Tone: {tone}
97
  - CTA: {call_to_action}
98
- - Created: {new_content['created']}
 
99
  """
100
 
101
  return output
102
 
103
- def generate_hashtags(topic, platform):
104
- """Generate relevant hashtags"""
105
- base_hashtags = topic.lower().replace(' ', '').replace(',', ' #')
106
-
107
- if platform == "Instagram":
108
- platform_tags = "#instagood #photooftheday #instadaily #motivation #inspiration"
109
- elif platform == "TikTok":
110
- platform_tags = "#fyp #foryou #viral #trending #tiktok"
111
- else:
112
- platform_tags = "#content #socialmedia #digital"
113
-
114
- return f"#{base_hashtags} {platform_tags}"
115
-
116
  def get_trending_topics():
117
  """Get trending topic suggestions"""
118
  trending = [
119
- "Productivity hacks",
120
- "Morning routine",
121
- "Self care Sunday",
122
- "Workspace setup",
123
- "Healthy recipes",
124
- "Weekend vibes",
125
- "Goal setting",
126
- "Mindfulness tips"
127
  ]
128
  return random.choice(trending)
129
 
@@ -168,20 +314,64 @@ with gr.Blocks(
168
  padding: 20px;
169
  border-radius: 10px;
170
  background: #f8f9fa;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  }
172
  """
173
  ) as demo:
174
- gr.Markdown("# 📱 Social Media Content Creator")
175
- gr.Markdown("Create engaging content for Instagram and TikTok with AI assistance")
 
 
 
 
 
 
 
 
 
 
 
 
176
 
177
  with gr.Tab("Create Content"):
178
  with gr.Row():
179
  with gr.Column(scale=2):
180
- platform = gr.Dropdown(
181
- choices=["Instagram", "TikTok", "Both"],
182
- label="Platform",
183
- value="Instagram"
184
- )
 
 
 
 
 
 
 
 
185
 
186
  content_type = gr.Dropdown(
187
  choices=[
@@ -294,6 +484,19 @@ with gr.Blocks(
294
  # Output section
295
  output = gr.Markdown(elem_classes="output-markdown")
296
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
  # Event handlers
298
  def update_tips(selected_platform):
299
  if selected_platform == "Instagram":
@@ -332,10 +535,41 @@ with gr.Blocks(
332
  generate_social_media_content,
333
  inputs=[
334
  platform, content_type, topic, target_audience,
335
- tone, post_length, brand_voice, key_message, call_to_action
336
  ],
337
  outputs=output
338
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
 
340
  with gr.Tab("Templates"):
341
  gr.Markdown("### Content Templates")
 
2
  import uuid
3
  from datetime import datetime
4
  import random
5
+ import os
6
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
7
+ import torch
8
+ from huggingface_hub import login, HfApi
9
 
10
+ # Token setup - Read from environment variables
11
+ HF_TOKEN_READ = os.environ.get("HF_TOKEN_READ", "")
12
+ HF_TOKEN_WRITE = os.environ.get("HF_TOKEN_WRITE", "")
13
+
14
+ # Login with appropriate token
15
+ if HF_TOKEN_READ:
16
+ try:
17
+ login(token=HF_TOKEN_READ)
18
+ print("Logged in with read token")
19
+ except Exception as e:
20
+ print(f"Error logging in with read token: {e}")
21
+
22
+ # Initialize HF API for write operations (if token is available)
23
+ hf_api = None
24
+ if HF_TOKEN_WRITE:
25
+ try:
26
+ hf_api = HfApi(token=HF_TOKEN_WRITE)
27
+ print("HF API initialized with write token")
28
+ except Exception as e:
29
+ print(f"Error initializing HF API: {e}")
30
+
31
+ # Model loading function with token support
32
+ @gr.Cache()
33
+ def load_model(model_name="microsoft/DialoGPT-medium"):
34
+ """Load a Hugging Face model for text generation"""
35
+ try:
36
+ # Use read token if available
37
+ token = HF_TOKEN_READ if HF_TOKEN_READ else None
38
+
39
+ tokenizer = AutoTokenizer.from_pretrained(
40
+ model_name,
41
+ token=token,
42
+ use_auth_token=token is not None
43
+ )
44
+
45
+ if tokenizer.pad_token is None:
46
+ tokenizer.pad_token = tokenizer.eos_token
47
+
48
+ model = AutoModelForCausalLM.from_pretrained(
49
+ model_name,
50
+ token=token,
51
+ use_auth_token=token is not None
52
+ )
53
+
54
+ return tokenizer, model
55
+ except Exception as e:
56
+ print(f"Error loading model: {e}")
57
+ return None, None
58
+
59
+ # Save content to Hugging Face Hub (if write token is available)
60
+ def save_to_hub(content, filename="social_content.txt", repo_id="your-username/social-media-content"):
61
+ """Save generated content to Hugging Face Hub"""
62
+ if not hf_api or not HF_TOKEN_WRITE:
63
+ return "Write token not configured. Content not saved to Hub."
64
+
65
+ try:
66
+ # Create a temporary file
67
+ with open(filename, "w") as f:
68
+ f.write(content)
69
+
70
+ # Upload to Hub
71
+ hf_api.upload_file(
72
+ path_or_fileobj=filename,
73
+ path_in_repo=filename,
74
+ repo_id=repo_id,
75
+ repo_type="dataset",
76
+ commit_message=f"Add social media content - {datetime.now().strftime('%Y-%m-%d %H:%M')}"
77
+ )
78
+
79
+ # Clean up
80
+ os.remove(filename)
81
+
82
+ return f"Content saved to Hub: https://huggingface.co/datasets/{repo_id}"
83
+ except Exception as e:
84
+ return f"Error saving to Hub: {e}"
85
+
86
+ # Fallback content generation
87
+ def fallback_generate_content(prompt, platform):
88
+ """Fallback content generation if AI models fail"""
89
  responses = {
90
+ "Instagram": [
91
  f"✨ {prompt.title()} ✨\n\nReady to transform your approach? Here's what you need to know:\n\n🔥 Key insight that changes everything\n💡 Pro tip that most people miss\n⚡ Action step you can take today\n\nWhat's your experience? Share in the comments! 👇",
92
  f"🌟 Behind the scenes of {prompt} 🌟\n\nSharing our process and what makes it special!\n\nEver wondered about {prompt}? Let us know your questions below! ⬇️",
 
93
  ],
94
+ "TikTok": [
95
  f"POV: You finally understand {prompt} 🤯\n\n*shows before and after*\n\nThe secret? [key insight]\n\nWho else needed to hear this? 💪",
96
  f"Wait until you try this {prompt} hack! 👀\n\nGame changer alert! 🚨\n\nSave this for later! ⬇️",
 
97
  ],
98
+ "Both": [
99
  f"Cross-platform content for {prompt} 📱\n\nCreating value across different channels!\n\nWhat platform do you prefer? Let me know! 🗣️",
 
100
  ]
101
  }
102
 
103
+ return random.choice(responses.get(platform, responses["Both"]))
104
+
105
+ # AI-powered content generation
106
+ def generate_content(prompt, platform, max_length=150):
107
+ """Generate content using Hugging Face models"""
108
+ try:
109
+ # Load an appropriate model based on platform
110
+ if platform == "Instagram":
111
+ model_name = "microsoft/DialoGPT-medium"
112
+ else:
113
+ model_name = "gpt2"
114
+
115
+ tokenizer, model = load_model(model_name)
116
+
117
+ if tokenizer is None or model is None:
118
+ return fallback_generate_content(prompt, platform)
119
+
120
+ # Format prompt for better results
121
+ formatted_prompt = f"Create engaging {platform} content about: {prompt}"
122
+
123
+ # Generate content
124
+ inputs = tokenizer.encode(formatted_prompt, return_tensors="pt")
125
+ attention_mask = torch.ones(inputs.shape, dtype=torch.long)
126
+
127
+ outputs = model.generate(
128
+ inputs,
129
+ max_length=max_length,
130
+ num_return_sequences=1,
131
+ temperature=0.8,
132
+ do_sample=True,
133
+ pad_token_id=tokenizer.eos_token_id,
134
+ attention_mask=attention_mask
135
+ )
136
+
137
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
138
+
139
+ # Clean up the output
140
+ if generated_text.startswith(formatted_prompt):
141
+ generated_text = generated_text[len(formatted_prompt):].strip()
142
+
143
+ return generated_text if generated_text else fallback_generate_content(prompt, platform)
144
+
145
+ except Exception as e:
146
+ print(f"Error in generate_content: {e}")
147
+ return fallback_generate_content(prompt, platform)
148
+
149
+ # Improved hashtag generation
150
+ def generate_hashtags(topic, platform):
151
+ """Generate relevant hashtags using AI"""
152
+ try:
153
+ # Use read token if available
154
+ token = HF_TOKEN_READ if HF_TOKEN_READ else None
155
+
156
+ # Use a pipeline for text generation
157
+ generator = pipeline(
158
+ 'text-generation',
159
+ model='gpt2',
160
+ token=token,
161
+ use_auth_token=token is not None
162
+ )
163
+
164
+ prompt = f"Generate 5 relevant hashtags for {topic} on {platform}:"
165
+ result = generator(prompt, max_length=50, num_return_sequences=1)
166
+
167
+ hashtags = result[0]['generated_text'].replace(prompt, '').strip()
168
+ # Clean up and format hashtags
169
+ hashtag_list = [tag.strip().replace(' ', '') for tag in hashtags.split()[:5]]
170
+ hashtags = ' '.join(['#' + tag for tag in hashtag_list if tag])
171
+
172
+ return hashtags if hashtags else fallback_hashtags(topic, platform)
173
+
174
+ except Exception as e:
175
+ print(f"Error generating hashtags: {e}")
176
+ return fallback_hashtags(topic, platform)
177
+
178
+ def fallback_hashtags(topic, platform):
179
+ """Fallback hashtag generation"""
180
+ base_hashtags = topic.lower().replace(' ', '').replace(',', ' #')
181
+
182
+ if platform == "Instagram":
183
+ platform_tags = "#instagood #photooftheday #instadaily #motivation #inspiration"
184
+ elif platform == "TikTok":
185
+ platform_tags = "#fyp #foryou #viral #trending #tiktok"
186
  else:
187
+ platform_tags = "#content #socialmedia #digital"
188
+
189
+ return f"#{base_hashtags} {platform_tags}"
190
+
191
+ # Content improvement function
192
+ def improve_content(content, platform, tone):
193
+ """Improve existing content using AI"""
194
+ try:
195
+ # Use read token if available
196
+ token = HF_TOKEN_READ if HF_TOKEN_READ else None
197
+
198
+ improver = pipeline(
199
+ 'text2text-generation',
200
+ model='google/flan-t5-base',
201
+ token=token,
202
+ use_auth_token=token is not None
203
+ )
204
+
205
+ prompt = f"Improve this {platform} content to make it more {tone}: {content}"
206
+ result = improver(prompt, max_length=200)
207
+
208
+ return result[0]['generated_text']
209
+ except Exception as e:
210
+ print(f"Error improving content: {e}")
211
+ return content
212
 
213
  # Main content generation function
214
+ def generate_social_media_content(platform, content_type, topic, target_audience, tone, length, brand_voice, key_message, call_to_action, model_choice):
215
  """Generate social media content based on inputs"""
216
 
217
  # Generate main content
218
  content_prompt = f"Create a {content_type.lower()} for {platform} about {topic} targeting {target_audience} in a {tone.lower()} tone"
219
+ generated_content = generate_content(content_prompt, platform)
220
 
221
  # Generate hashtags
222
  hashtags = generate_hashtags(topic, platform)
223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
  # Calculate stats
225
  content_text = generated_content
226
  char_count = len(content_text)
 
258
  - Target: {target_audience}
259
  - Tone: {tone}
260
  - CTA: {call_to_action}
261
+ - AI Model: {model_choice}
262
+ - Created: {datetime.now().strftime('%Y-%m-%d %H:%M')}
263
  """
264
 
265
  return output
266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
  def get_trending_topics():
268
  """Get trending topic suggestions"""
269
  trending = [
270
+ "Productivity hacks", "Morning routine", "Self care Sunday",
271
+ "Workspace setup", "Healthy recipes", "Weekend vibes",
272
+ "Goal setting", "Mindfulness tips"
 
 
 
 
 
273
  ]
274
  return random.choice(trending)
275
 
 
314
  padding: 20px;
315
  border-radius: 10px;
316
  background: #f8f9fa;
317
+ border-left: 4px solid #667eea;
318
+ }
319
+ .header {
320
+ text-align: center;
321
+ padding: 20px;
322
+ background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
323
+ color: white;
324
+ border-radius: 10px;
325
+ margin-bottom: 20px;
326
+ }
327
+ .token-status {
328
+ padding: 10px;
329
+ border-radius: 5px;
330
+ margin-bottom: 10px;
331
+ }
332
+ .token-ok {
333
+ background: #d4edda;
334
+ color: #155724;
335
+ border: 1px solid #c3e6cb;
336
+ }
337
+ .token-warning {
338
+ background: #fff3cd;
339
+ color: #856404;
340
+ border: 1px solid #ffeeba;
341
  }
342
  """
343
  ) as demo:
344
+ with gr.Column():
345
+ gr.Markdown("""
346
+ <div class="header">
347
+ <h1>📱 Social Media Content Creator</h1>
348
+ <p>Create engaging content for Instagram and TikTok with AI assistance</p>
349
+ </div>
350
+ """)
351
+
352
+ # Token status indicator
353
+ token_status = "🔑 Read Token: " + ("✅ Available" if HF_TOKEN_READ else "❌ Not configured")
354
+ token_status += " | Write Token: " + ("✅ Available" if HF_TOKEN_WRITE else "❌ Not configured")
355
+
356
+ token_status_class = "token-ok" if HF_TOKEN_READ else "token-warning"
357
+ gr.Markdown(f"""<div class="token-status {token_status_class}">{token_status}</div>""")
358
 
359
  with gr.Tab("Create Content"):
360
  with gr.Row():
361
  with gr.Column(scale=2):
362
+ with gr.Row():
363
+ platform = gr.Dropdown(
364
+ choices=["Instagram", "TikTok", "Both"],
365
+ label="Platform",
366
+ value="Instagram"
367
+ )
368
+
369
+ model_choice = gr.Dropdown(
370
+ choices=["Auto-Select", "DialoGPT (Instagram)", "GPT-2 (TikTok)", "FLAN-T5"],
371
+ label="AI Model",
372
+ value="Auto-Select",
373
+ interactive=True
374
+ )
375
 
376
  content_type = gr.Dropdown(
377
  choices=[
 
484
  # Output section
485
  output = gr.Markdown(elem_classes="output-markdown")
486
 
487
+ # Improvement UI
488
+ with gr.Row():
489
+ improve_btn = gr.Button("Improve with AI", variant="secondary")
490
+ enhance_tone = gr.Dropdown(
491
+ choices=["Casual", "Professional", "Fun", "Inspirational", "Educational", "Trendy"],
492
+ label="Enhance Tone",
493
+ value="Casual"
494
+ )
495
+
496
+ # Save to Hub button (only show if write token is available)
497
+ if HF_TOKEN_WRITE:
498
+ save_btn = gr.Button("💾 Save to Hub", variant="secondary")
499
+
500
  # Event handlers
501
  def update_tips(selected_platform):
502
  if selected_platform == "Instagram":
 
535
  generate_social_media_content,
536
  inputs=[
537
  platform, content_type, topic, target_audience,
538
+ tone, post_length, brand_voice, key_message, call_to_action, model_choice
539
  ],
540
  outputs=output
541
  )
542
+
543
+ def improve_existing_content(content, platform, tone):
544
+ if content and "## Content:" in content:
545
+ # Extract the actual content part
546
+ content_part = content.split("## Content:")[1].split("## Hashtags:")[0].strip()
547
+ improved = improve_content(content_part, platform, tone)
548
+
549
+ # Reconstruct the output
550
+ parts = content.split("## Content:")
551
+ parts[1] = f"\n{improved}\n\n"
552
+ return "## Content:".join(parts)
553
+ return content
554
+
555
+ improve_btn.click(
556
+ improve_existing_content,
557
+ inputs=[output, platform, enhance_tone],
558
+ outputs=output
559
+ )
560
+
561
+ # Save to Hub handler
562
+ if HF_TOKEN_WRITE:
563
+ def save_content_to_hub(content):
564
+ if not content:
565
+ return "No content to save"
566
+ return save_to_hub(content)
567
+
568
+ save_btn.click(
569
+ save_content_to_hub,
570
+ inputs=output,
571
+ outputs=gr.Markdown()
572
+ )
573
 
574
  with gr.Tab("Templates"):
575
  gr.Markdown("### Content Templates")