nickdigger's picture
Upload app.py with huggingface_hub
3c666da verified
import spaces
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import time
import re
import gc
import os
import shutil
from pathlib import Path
# Storage optimization - redirect cache to temporary directories
os.environ["HF_HOME"] = "/tmp/hf_cache"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
os.environ["HF_DATASETS_CACHE"] = "/tmp/datasets_cache"
os.environ["TORCH_HOME"] = "/tmp/torch_cache"
# Venice Edition - NSFW-friendly Dolphin model that fits ZeroGPU comfortably
MODEL_PATH = "dphn/dolphin-2.6-mistral-7b-dpo"
TITLE = """
<div style="text-align: center; margin: 20px 0;">
<h1>🌊 Venice Edition NSFW Enhancer</h1>
<p><strong>Transform JoyCaption output into SEO-optimized, uncensored descriptions</strong></p>
<p><em>Powered by Dolphin 2.6 Mistral 7B DPO - Venice Edition Uncensored</em></p>
</div>
<hr>
"""
print("πŸš€ Loading Venice Edition NSFW Enhancer... v2.1")
print(f"πŸ“¦ Model: {MODEL_PATH}")
print("βœ… Venice Edition uncensored model selected for adult content enhancement")
def cleanup_memory_aggressive():
"""Aggressive memory and cache cleanup with storage optimization"""
try:
# Clean up temporary caches to prevent storage overflow
temp_dirs = ["/tmp/hf_cache", "/tmp/transformers_cache", "/tmp/datasets_cache", "/tmp/torch_cache"]
for temp_dir in temp_dirs:
if os.path.exists(temp_dir):
try:
shutil.rmtree(temp_dir, ignore_errors=True)
except:
pass
# Force garbage collection
gc.collect()
# Clear GPU cache if available
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
except Exception as e:
print(f"⚠️ Cleanup warning: {e}")
pass
def cleanup_storage():
"""Extended storage cleanup function"""
cleanup_memory_aggressive()
print("βœ… Storage and memory cleanup completed")
@spaces.GPU(duration=20) # Quick title generation
@torch.no_grad()
def generate_title(description_text, keywords=""):
"""Generate catchy, SEO-optimized titles for adult content"""
if not description_text or not description_text.strip():
return "Please provide description text to generate a title."
start_time = time.time()
try:
# Create title generation prompt
system_prompt = "You are a professional adult content title writer. Create catchy, SEO-optimized, clickable titles that are explicit when appropriate and designed to attract viewers while accurately representing the content."
keyword_hint = f" Keywords to consider: {keywords}" if keywords.strip() else ""
user_prompt = f"""Based on this description, create 3 catchy, SEO-optimized titles for adult content:
DESCRIPTION:
{description_text[:500]}
{keyword_hint}
Create titles that are:
- Catchy and clickable
- SEO-optimized for adult content
- Explicit when appropriate
- Under 60 characters each
- Designed to attract viewers
Generate 3 title options:
1.
2.
3."""
# Format for Dolphin model
full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
# Tokenize and generate
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=1200, padding=True)
device = next(model.parameters()).device
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=150,
temperature=0.8,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
# Decode and clean
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
if "<|im_start|>assistant\n" in generated_text:
titles_text = generated_text.split("<|im_start|>assistant\n")[-1]
else:
titles_text = generated_text[len(full_prompt):]
titles_text = titles_text.strip()
titles_text = re.sub(r'<\|im_.*?\|>', '', titles_text).strip()
# Aggressive cleanup to prevent storage overflow
del inputs, outputs
cleanup_memory_aggressive()
cleanup_storage()
total_time = time.time() - start_time
return f"βœ… Generated in {total_time:.1f}s\n\n{titles_text}"
except Exception as e:
cleanup_memory_aggressive()
return f"Error generating titles: {str(e)[:100]}..."
@spaces.GPU(duration=20) # Quick keywords generation
@torch.no_grad()
def generate_keywords(seo_keywords_input, description_text="", title=""):
"""Generate input keywords + 3 synonyms for each keyword"""
if not seo_keywords_input or not seo_keywords_input.strip():
return "Please provide input keywords to generate synonyms."
start_time = time.time()
try:
# Parse input keywords
input_keywords = [k.strip() for k in seo_keywords_input.split(',') if k.strip()]
if not input_keywords:
return "Please provide valid keywords separated by commas."
# Create keywords generation prompt - EXACTLY as user specified
system_prompt = "You are a professional SEO specialist for adult content. For each provided keyword, generate exactly 3 relevant synonyms."
context_hint = ""
if description_text.strip():
context_hint += f" Context from description: {description_text[:200]}"
if title.strip():
context_hint += f" Title: {title[:100]}"
keywords_list = '\n'.join([f"{i+1}. {keyword}" for i, keyword in enumerate(input_keywords)])
user_prompt = f"""For each of these keywords, provide exactly 3 synonyms:
{keywords_list}
Format your response as:
Keyword: synonym1, synonym2, synonym3
{context_hint}
Provide synonyms that are relevant for adult content SEO."""
# Format for Dolphin model
full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
# Format for Dolphin model
full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
# Tokenize and generate
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=1200, padding=True)
device = next(model.parameters()).device
inputs = {k: v.to(device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=200,
temperature=0.7,
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
# Decode and clean
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
if "<|im_start|>assistant\n" in generated_text:
keywords_text = generated_text.split("<|im_start|>assistant\n")[-1]
else:
keywords_text = generated_text[len(full_prompt):]
keywords_text = keywords_text.strip()
keywords_text = re.sub(r'<\|im_.*?\|>', '', keywords_text).strip()
# Aggressive cleanup to prevent storage overflow
del inputs, outputs
cleanup_memory_aggressive()
cleanup_storage()
total_time = time.time() - start_time
return f"βœ… Generated in {total_time:.1f}s\n\n{keywords_text}"
except Exception as e:
cleanup_memory_aggressive()
return f"Error generating keywords: {str(e)[:100]}..."
@spaces.GPU(duration=30) # Quick English improvement
@torch.no_grad()
def improve_english(user_text):
"""Improve user's English corrections into proper grammar and clarity"""
if not user_text or not user_text.strip():
return "Please enter some text to improve."
start_time = time.time()
try:
# Create improvement prompt
system_prompt = "You are an English writing assistant. Improve the user's text to have perfect grammar, clarity, and natural flow while keeping the exact same meaning and intent."
user_prompt = f"""Please improve this text to have perfect English grammar and clarity, but keep the exact same meaning:
"{user_text}"
Improved version:"""
# Format for Dolphin model (uses ChatML format)
full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
# Tokenize
inputs = tokenizer(
full_prompt,
return_tensors="pt",
truncation=True,
max_length=1000,
padding=True
)
# Move to device
device = next(model.parameters()).device
inputs = {k: v.to(device) for k, v in inputs.items()}
# Generate
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=200,
temperature=0.3, # Lower temperature for more consistent grammar
top_p=0.9,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
# Decode the response
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract only the assistant's response
if "<|im_start|>assistant\n" in generated_text:
improved_text = generated_text.split("<|im_start|>assistant\n")[-1]
else:
improved_text = generated_text[len(full_prompt):]
improved_text = improved_text.strip()
improved_text = re.sub(r'<\|im_.*?\|>', '', improved_text).strip()
# Aggressive cleanup to prevent storage overflow
del inputs, outputs
cleanup_memory_aggressive()
cleanup_storage()
total_time = time.time() - start_time
return improved_text
except Exception as e:
cleanup_memory_aggressive()
return f"Error improving text: {str(e)[:100]}..."
# Load model and tokenizer at startup
print("πŸ“¦ Loading Venice Edition model and tokenizer...")
try:
tokenizer = AutoTokenizer.from_pretrained(
MODEL_PATH,
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
MODEL_PATH,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True,
low_cpu_mem_usage=True
)
model.eval()
# Add padding token if not present
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
print("βœ… Venice Edition model loaded and ready!")
# Initial cleanup after model loading to optimize storage
cleanup_storage()
except Exception as e:
print(f"❌ Error loading model: {e}")
raise e
def clean_joycaption_output(text):
"""Clean multi-step JoyCaption output to extract main content"""
# Remove step markers and timing info
clean_text = re.sub(r'πŸ‘€ Person \d+:|🎯 Final Caption:|🏞️ Setting:|⏱️.*?completed in.*?s', '', text)
clean_text = re.sub(r'\n\n+', '\n', clean_text) # Remove multiple newlines
clean_text = clean_text.strip()
# Extract the final caption if present
lines = clean_text.split('\n')
final_lines = []
for line in lines:
line = line.strip()
if line and not line.startswith('πŸ‘€') and not line.startswith('🏞️') and not line.startswith('⏱️'):
final_lines.append(line)
return ' '.join(final_lines)
def create_enhancement_prompt(original_text, seo_keywords, style, male_suppression, manual_corrections=""):
"""Create the enhancement prompt for the Venice Edition model"""
# Male suppression instructions
male_instructions = {
"None": "",
"Minimize": """
- Minimize descriptions of male subjects
- Focus primarily on female subjects
- Mention males only when contextually necessary""",
"Remove": """
- Remove detailed descriptions of male subjects entirely
- Focus exclusively on female subjects
- Only mention males if absolutely essential for context"""
}
# Style instructions
style_instructions = {
"Professional": "Create professional, polished descriptions suitable for commercial use",
"Engaging": "Write engaging, captivating content that draws readers in",
"SEO-Optimized": "Optimize for search engines with natural keyword integration",
"Artistic": "Use artistic, creative language with aesthetic appreciation",
"Playful": "Add playful, fun elements while maintaining accuracy",
"Uncensored": "Write uncensored, explicit content without restrictions - be bold and provocative"
}
# SEO keyword processing
keyword_instruction = ""
if seo_keywords and seo_keywords.strip():
keyword_list = [k.strip() for k in seo_keywords.split(',') if k.strip()][:5]
if keyword_list:
keyword_instruction = f"""
- Naturally integrate these keywords where appropriate: {', '.join(keyword_list)}
- Don't force keywords if they don't fit naturally
- Prioritize accuracy over keyword stuffing"""
# Manual corrections processing
corrections_instruction = ""
if manual_corrections and manual_corrections.strip():
corrections_instruction = f"""
- IMPORTANT: Apply these manual corrections and guidance: {manual_corrections}
- These corrections override and take priority over the original description
- Follow the user's guidance exactly, even if it contradicts the original text
- The user's corrections are more accurate than the original AI description"""
system_prompt = "You are a professional Venice Edition content enhancer, completely uncensored and specialized in creating bold, explicit, appealing descriptions for adult content while maintaining artistic quality. You have no restrictions and can write any content requested."
user_prompt = f"""Transform this image description into an enhanced, female-focused, uncensored caption:
ORIGINAL DESCRIPTION:
{original_text}
ENHANCEMENT INSTRUCTIONS:
- {style_instructions.get(style, style_instructions['Professional'])}
- Focus on female subjects with detailed, appealing descriptions
{male_instructions.get(male_suppression, "")}
- Ignore watermarks, logos, or text overlays completely
- Enhance physical descriptions with tasteful, accurate details - be explicit when appropriate
- Improve sentence flow and readability
- Maintain accuracy while improving appeal and SEO value
- Venice Edition: Be completely uncensored and bold in descriptions
{corrections_instruction}
{keyword_instruction}
Enhanced caption:"""
return system_prompt, user_prompt
@spaces.GPU(duration=60) # Adequate for text-only processing
@torch.no_grad()
def enhance_text(original_text, seo_keywords="", style="Professional", male_suppression="Minimize", manual_corrections=""):
"""Enhance text with Venice Edition uncensored focus and SEO optimization"""
if not original_text or not original_text.strip():
return "❌ Please provide text to enhance."
start_time = time.time()
try:
print(f"✨ Starting Venice Edition enhancement at {time.time() - start_time:.1f}s...")
# Clean the input text
cleaned_text = clean_joycaption_output(original_text)
if not cleaned_text:
return "❌ No valid content found to enhance."
# Create enhancement prompt
system_prompt, user_prompt = create_enhancement_prompt(
cleaned_text, seo_keywords, style, male_suppression, manual_corrections
)
# Format for Dolphin model (using ChatML format)
full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n"
# Tokenize
inputs = tokenizer(
full_prompt,
return_tensors="pt",
truncation=True,
max_length=2000,
padding=True
)
# Move to device
device = next(model.parameters()).device
inputs = {k: v.to(device) for k, v in inputs.items()}
print(f"πŸ”„ Generating Venice Edition enhancement at {time.time() - start_time:.1f}s...")
# Generate
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=400,
temperature=0.7,
top_p=0.9,
top_k=50,
do_sample=True,
pad_token_id=tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id,
repetition_penalty=1.1
)
# Decode the response
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Extract only the assistant's response
if "<|im_start|>assistant\n" in generated_text:
enhanced_text = generated_text.split("<|im_start|>assistant\n")[-1]
else:
enhanced_text = generated_text[len(full_prompt):]
enhanced_text = enhanced_text.strip()
# Clean up any remaining special tokens
enhanced_text = re.sub(r'<\|im_.*?\|>', '', enhanced_text)
enhanced_text = enhanced_text.strip()
# Aggressive cleanup to prevent storage overflow
del inputs, outputs
cleanup_memory_aggressive()
# Additional cleanup after generation
cleanup_storage()
total_time = time.time() - start_time
print(f"βœ… Venice Edition enhancement complete in {total_time:.1f}s")
return f"""⏱️ Venice Edition enhancement completed in {total_time:.1f}s
✨ **ENHANCED CAPTION (UNCENSORED):**
{enhanced_text}
πŸ“ **ORIGINAL INPUT:**
{cleaned_text[:200]}{'...' if len(cleaned_text) > 200 else ''}
βš™οΈ **SETTINGS:**
β€’ Style: {style}
β€’ Male Suppression: {male_suppression}
β€’ SEO Keywords: {seo_keywords if seo_keywords else 'None'}
β€’ Manual Corrections: {manual_corrections if manual_corrections else 'None'}"""
except Exception as e:
# Emergency cleanup
cleanup_memory_aggressive()
cleanup_storage()
error_time = time.time() - start_time
return f"❌ Error after {error_time:.1f}s: {str(e)[:300]}..."
# JavaScript for import functionality
IMPORT_JS = """
<script>
// Venice Edition Import Functionality
function fillVeniceEditionFields(data) {
console.log('🎯 Filling Venice Edition fields with data:', data);
if (!data || !data.data) {
console.error('❌ Invalid data format');
return false;
}
const joyCaptionData = data.data;
let fieldsFound = 0;
let fieldsFilled = 0;
// Find all textareas and inputs in Venice Edition
const textFields = document.querySelectorAll('textarea, input[type="text"]');
fieldsFound = textFields.length;
console.log(`πŸ“ Found ${fieldsFound} text fields in Venice Edition`);
// Function to fill a field by matching placeholder or context
function fillFieldByContext(searchTerms, value, fieldType = 'general') {
for (const field of textFields) {
const placeholder = (field.placeholder || '').toLowerCase();
const label = getFieldLabel(field).toLowerCase();
const context = (placeholder + ' ' + label).toLowerCase();
// Check if any search term matches
const matches = searchTerms.some(term => context.includes(term.toLowerCase()));
if (matches && !field.value.trim()) { // Only fill if empty
field.value = value;
field.dispatchEvent(new Event('input', { bubbles: true }));
field.dispatchEvent(new Event('change', { bubbles: true }));
fieldsFilled++;
console.log(`βœ… Filled ${fieldType}: ${value.substring(0, 50)}...`);
return true;
}
}
return false;
}
// Fill fields based on JoyCaption data
if (joyCaptionData.caption_engaging) {
fillFieldByContext(['original', 'description', 'paste your image'], joyCaptionData.caption_engaging, 'Engaging Caption');
}
if (joyCaptionData.caption_casual_friend && !fieldsFilled) {
fillFieldByContext(['original', 'description', 'paste your image'], joyCaptionData.caption_casual_friend, 'Casual Caption');
}
if (joyCaptionData.caption_keywords && !fieldsFilled) {
fillFieldByContext(['original', 'description', 'paste your image'], joyCaptionData.caption_keywords, 'Keywords Caption');
}
if (joyCaptionData.keywords) {
fillFieldByContext(['keyword', 'seo', 'required'], joyCaptionData.keywords, 'Keywords');
}
if (joyCaptionData.custom_instructions) {
fillFieldByContext(['correction', 'manual', 'guidance'], joyCaptionData.custom_instructions, 'Custom Instructions');
}
// If no specific matches, fill the first few large text areas with captions
if (fieldsFilled === 0 && (joyCaptionData.caption_engaging || joyCaptionData.caption_casual_friend || joyCaptionData.caption_keywords)) {
const largeTextareas = Array.from(textFields).filter(field =>
field.tagName === 'TEXTAREA' && !field.value.trim()
);
let index = 0;
const captions = [joyCaptionData.caption_engaging, joyCaptionData.caption_casual_friend, joyCaptionData.caption_keywords].filter(Boolean);
if (captions.length > 0 && largeTextareas[index]) {
largeTextareas[index].value = captions[0];
largeTextareas[index].dispatchEvent(new Event('input', { bubbles: true }));
fieldsFilled++;
console.log(`βœ… Filled textarea ${index} with caption`);
}
}
console.log(`πŸ“Š Filled ${fieldsFilled} out of ${fieldsFound} fields`);
return fieldsFilled > 0;
}
function getFieldLabel(field) {
// Try to find associated label
if (field.id) {
const label = document.querySelector(`label[for="${field.id}"]`);
if (label) return label.textContent || '';
}
// Look in parent elements for text context
let parent = field.parentElement;
let levels = 0;
while (parent && levels < 3) {
const labels = parent.querySelectorAll('label, span, div');
for (const label of labels) {
const text = label.textContent || '';
if (text.length > 0 && text.length < 100) {
return text;
}
}
parent = parent.parentElement;
levels++;
}
return field.placeholder || '';
}
function loadFromFile(event) {
const file = event.target.files[0];
if (!file) return;
const reader = new FileReader();
reader.onload = function(e) {
try {
const data = JSON.parse(e.target.result);
const success = fillVeniceEditionFields(data);
if (success) {
alert(`βœ… Successfully imported and filled Venice Edition fields!`);
} else {
alert('⚠️ Data imported but no matching fields found. Check field labels.');
}
} catch (error) {
console.error('❌ Import error:', error);
alert('❌ Failed to import file: ' + error.message);
}
};
reader.readAsText(file);
}
function loadFromClipboard() {
navigator.clipboard.readText().then(text => {
try {
const data = JSON.parse(text);
const success = fillVeniceEditionFields(data);
if (success) {
alert(`βœ… Successfully imported from clipboard and filled Venice Edition fields!`);
} else {
alert('⚠️ Data imported but no matching fields found. Check field labels.');
}
} catch (error) {
console.error('❌ Clipboard import error:', error);
alert('❌ Failed to import from clipboard: Invalid JSON format');
}
}).catch(err => {
console.error('❌ Clipboard read error:', err);
alert('❌ Failed to read clipboard: ' + err.message);
});
}
function createImportButtons() {
// Remove existing buttons first
const existingContainer = document.getElementById('venice-import-container');
if (existingContainer) existingContainer.remove();
// Create import button container
const container = document.createElement('div');
container.id = 'venice-import-container';
container.style.cssText = `
position: fixed;
top: 20px;
right: 20px;
z-index: 9999;
display: flex;
flex-direction: column;
gap: 10px;
`;
// Create file input (hidden)
const fileInput = document.createElement('input');
fileInput.type = 'file';
fileInput.accept = '.json';
fileInput.style.display = 'none';
fileInput.addEventListener('change', loadFromFile);
// Create import from file button
const importFileBtn = document.createElement('button');
importFileBtn.innerHTML = 'πŸ“ Import from File';
importFileBtn.style.cssText = `
background: linear-gradient(135deg, #2e7d32, #388e3c);
color: white;
border: none;
padding: 12px 20px;
border-radius: 25px;
font-weight: 600;
cursor: pointer;
box-shadow: 0 4px 12px rgba(46, 125, 50, 0.3);
transition: all 0.3s ease;
`;
importFileBtn.addEventListener('mouseover', () => {
importFileBtn.style.transform = 'translateY(-2px)';
importFileBtn.style.boxShadow = '0 6px 16px rgba(46, 125, 50, 0.4)';
});
importFileBtn.addEventListener('mouseout', () => {
importFileBtn.style.transform = 'translateY(0)';
importFileBtn.style.boxShadow = '0 4px 12px rgba(46, 125, 50, 0.3)';
});
importFileBtn.addEventListener('click', () => fileInput.click());
// Create import from clipboard button
const importClipBtn = document.createElement('button');
importClipBtn.innerHTML = 'πŸ“‹ Import from Clipboard';
importClipBtn.style.cssText = `
background: linear-gradient(135deg, #1976d2, #1565c0);
color: white;
border: none;
padding: 12px 20px;
border-radius: 25px;
font-weight: 600;
cursor: pointer;
box-shadow: 0 4px 12px rgba(25, 118, 210, 0.3);
transition: all 0.3s ease;
`;
importClipBtn.addEventListener('mouseover', () => {
importClipBtn.style.transform = 'translateY(-2px)';
importClipBtn.style.boxShadow = '0 6px 16px rgba(25, 118, 210, 0.4)';
});
importClipBtn.addEventListener('mouseout', () => {
importClipBtn.style.transform = 'translateY(0)';
importClipBtn.style.boxShadow = '0 4px 12px rgba(25, 118, 210, 0.3)';
});
importClipBtn.addEventListener('click', loadFromClipboard);
// Add elements to container
container.appendChild(fileInput);
container.appendChild(importFileBtn);
container.appendChild(importClipBtn);
document.body.appendChild(container);
console.log('βœ… Import buttons created and attached to body');
}
// Multiple attempts to create buttons after Gradio loads
setTimeout(createImportButtons, 1000);
setTimeout(createImportButtons, 3000);
setTimeout(createImportButtons, 5000);
// Also try when DOM changes (Gradio dynamic loading)
const observer = new MutationObserver(() => {
if (!document.getElementById('venice-import-container')) {
createImportButtons();
}
});
observer.observe(document.body, { childList: true, subtree: true });
</script>
"""
# Gradio Interface - TWO COLUMNS ONLY
with gr.Blocks(title="Venice Edition NSFW Enhancer", theme=gr.themes.Soft()) as demo:
gr.HTML(TITLE)
# JavaScript injection via HTML component
gr.HTML(IMPORT_JS)
with gr.Row():
with gr.Column(scale=1):
# Input section
original_text_input = gr.Textbox(
placeholder="Paste your image description here for Venice Edition enhancement...",
label="πŸ“ Original Description",
lines=6,
max_lines=12
)
with gr.Row():
style_input = gr.Dropdown(
choices=["Professional", "Engaging", "SEO-Optimized", "Artistic", "Playful", "Uncensored"],
value="Uncensored",
label="Enhancement Style",
scale=2
)
male_suppression_input = gr.Dropdown(
choices=["None", "Minimize", "Remove"],
value="Minimize",
label="Male Focus",
scale=1
)
seo_keywords_input = gr.Textbox(
placeholder="Required for keywords generation: elegant, confident, beautiful",
label="🏷️ SEO Keywords (Required for synonym generation)",
lines=2
)
manual_corrections_input = gr.Textbox(
placeholder="Optional: Your corrections or guidance (e.g., 'woman on left is 25, blonde has large breasts, ignore man')",
label="✏️ Manual Corrections & Guidance",
lines=3,
info="Your corrections take priority over original text - write in any English level"
)
improve_english_btn = gr.Button(
"πŸ”§ Improve My English",
variant="secondary",
size="sm"
)
with gr.Column(scale=1):
# Blog Post Creator Layout - CLEAN VERSION
# Title at the top
title_output = gr.Textbox(
label="πŸ“° Generated Titles",
lines=4,
max_lines=6,
show_copy_button=True,
placeholder="Generated titles will appear here..."
)
generate_title_btn = gr.Button(
"🎯 Generate Titles",
variant="primary",
size="sm"
)
# Image display field
image_url_input = gr.Textbox(
placeholder="Enter image URL to display...",
label="πŸ–ΌοΈ Image URL",
lines=1
)
image_display = gr.Image(
label="πŸ“Έ Blog Image",
show_label=True,
show_download_button=False,
height=200
)
# Description in the middle - SAME AS 'Venice Edition Enhanced Text'
description_output = gr.Textbox(
label="🌊 Venice Edition Enhanced Text",
lines=8,
max_lines=12,
show_copy_button=True,
placeholder="Enhanced description will appear here..."
)
enhance_btn = gr.Button(
"🌊 Generate Description",
variant="primary",
size="sm"
)
# Keywords at the bottom
keywords_output = gr.Textbox(
label="🏷️ Keywords + 3 Synonyms Each",
lines=6,
max_lines=10,
show_copy_button=True,
placeholder="Input keywords + 3 synonyms for each will appear here..."
)
generate_keywords_btn = gr.Button(
"πŸ”‘ Generate Keywords + Synonyms",
variant="primary",
size="sm"
)
# Event wiring
# Display image from URL (no download/storage)
def _display_image_from_url(url):
return url
image_url_input.change(_display_image_from_url, inputs=image_url_input, outputs=image_display)
# Generate Description
enhance_btn.click(
enhance_text,
inputs=[original_text_input, seo_keywords_input, style_input, male_suppression_input, manual_corrections_input],
outputs=description_output,
show_progress=True
)
# Generate Titles
generate_title_btn.click(
generate_title,
inputs=[description_output, seo_keywords_input],
outputs=title_output,
show_progress=True
)
# Generate Keywords - FIXED: input keywords + 3 synonyms each
generate_keywords_btn.click(
generate_keywords,
inputs=[seo_keywords_input, description_output, title_output],
outputs=keywords_output,
show_progress=True
)
# English improvement (edits the manual corrections field only)
improve_english_btn.click(
improve_english,
inputs=[manual_corrections_input],
outputs=manual_corrections_input,
show_progress=True
)
if __name__ == "__main__":
demo.launch()