import spaces import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM import time import re import gc import os import shutil from pathlib import Path # Storage optimization - redirect cache to temporary directories os.environ["HF_HOME"] = "/tmp/hf_cache" os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache" os.environ["HF_DATASETS_CACHE"] = "/tmp/datasets_cache" os.environ["TORCH_HOME"] = "/tmp/torch_cache" # Venice Edition - NSFW-friendly Dolphin model that fits ZeroGPU comfortably MODEL_PATH = "dphn/dolphin-2.6-mistral-7b-dpo" TITLE = """

🌊 Venice Edition NSFW Enhancer

Transform JoyCaption output into SEO-optimized, uncensored descriptions

Powered by Dolphin 2.6 Mistral 7B DPO - Venice Edition Uncensored


""" print("🚀 Loading Venice Edition NSFW Enhancer... v2.1") print(f"📦 Model: {MODEL_PATH}") print("✅ Venice Edition uncensored model selected for adult content enhancement") def cleanup_memory_aggressive(): """Aggressive memory and cache cleanup with storage optimization""" try: # Clean up temporary caches to prevent storage overflow temp_dirs = ["/tmp/hf_cache", "/tmp/transformers_cache", "/tmp/datasets_cache", "/tmp/torch_cache"] for temp_dir in temp_dirs: if os.path.exists(temp_dir): try: shutil.rmtree(temp_dir, ignore_errors=True) except: pass # Force garbage collection gc.collect() # Clear GPU cache if available if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.synchronize() except Exception as e: print(f"⚠️ Cleanup warning: {e}") pass def cleanup_storage(): """Extended storage cleanup function""" cleanup_memory_aggressive() print("✅ Storage and memory cleanup completed") @spaces.GPU(duration=20) # Quick title generation @torch.no_grad() def generate_title(description_text, keywords=""): """Generate catchy, SEO-optimized titles for adult content""" if not description_text or not description_text.strip(): return "Please provide description text to generate a title." start_time = time.time() try: # Create title generation prompt system_prompt = "You are a professional adult content title writer. Create catchy, SEO-optimized, clickable titles that are explicit when appropriate and designed to attract viewers while accurately representing the content." keyword_hint = f" Keywords to consider: {keywords}" if keywords.strip() else "" user_prompt = f"""Based on this description, create 3 catchy, SEO-optimized titles for adult content: DESCRIPTION: {description_text[:500]} {keyword_hint} Create titles that are: - Catchy and clickable - SEO-optimized for adult content - Explicit when appropriate - Under 60 characters each - Designed to attract viewers Generate 3 title options: 1. 2. 3.""" # Format for Dolphin model full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n" # Tokenize and generate inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=1200, padding=True) device = next(model.parameters()).device inputs = {k: v.to(device) for k, v in inputs.items()} with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=150, temperature=0.8, top_p=0.9, do_sample=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id ) # Decode and clean generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) if "<|im_start|>assistant\n" in generated_text: titles_text = generated_text.split("<|im_start|>assistant\n")[-1] else: titles_text = generated_text[len(full_prompt):] titles_text = titles_text.strip() titles_text = re.sub(r'<\|im_.*?\|>', '', titles_text).strip() # Aggressive cleanup to prevent storage overflow del inputs, outputs cleanup_memory_aggressive() cleanup_storage() total_time = time.time() - start_time return f"✅ Generated in {total_time:.1f}s\n\n{titles_text}" except Exception as e: cleanup_memory_aggressive() return f"Error generating titles: {str(e)[:100]}..." @spaces.GPU(duration=20) # Quick keywords generation @torch.no_grad() def generate_keywords(seo_keywords_input, description_text="", title=""): """Generate input keywords + 3 synonyms for each keyword""" if not seo_keywords_input or not seo_keywords_input.strip(): return "Please provide input keywords to generate synonyms." start_time = time.time() try: # Parse input keywords input_keywords = [k.strip() for k in seo_keywords_input.split(',') if k.strip()] if not input_keywords: return "Please provide valid keywords separated by commas." # Create keywords generation prompt - EXACTLY as user specified system_prompt = "You are a professional SEO specialist for adult content. For each provided keyword, generate exactly 3 relevant synonyms." context_hint = "" if description_text.strip(): context_hint += f" Context from description: {description_text[:200]}" if title.strip(): context_hint += f" Title: {title[:100]}" keywords_list = '\n'.join([f"{i+1}. {keyword}" for i, keyword in enumerate(input_keywords)]) user_prompt = f"""For each of these keywords, provide exactly 3 synonyms: {keywords_list} Format your response as: Keyword: synonym1, synonym2, synonym3 {context_hint} Provide synonyms that are relevant for adult content SEO.""" # Format for Dolphin model full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n" # Format for Dolphin model full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n" # Tokenize and generate inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=1200, padding=True) device = next(model.parameters()).device inputs = {k: v.to(device) for k, v in inputs.items()} with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=200, temperature=0.7, top_p=0.9, do_sample=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id ) # Decode and clean generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) if "<|im_start|>assistant\n" in generated_text: keywords_text = generated_text.split("<|im_start|>assistant\n")[-1] else: keywords_text = generated_text[len(full_prompt):] keywords_text = keywords_text.strip() keywords_text = re.sub(r'<\|im_.*?\|>', '', keywords_text).strip() # Aggressive cleanup to prevent storage overflow del inputs, outputs cleanup_memory_aggressive() cleanup_storage() total_time = time.time() - start_time return f"✅ Generated in {total_time:.1f}s\n\n{keywords_text}" except Exception as e: cleanup_memory_aggressive() return f"Error generating keywords: {str(e)[:100]}..." @spaces.GPU(duration=30) # Quick English improvement @torch.no_grad() def improve_english(user_text): """Improve user's English corrections into proper grammar and clarity""" if not user_text or not user_text.strip(): return "Please enter some text to improve." start_time = time.time() try: # Create improvement prompt system_prompt = "You are an English writing assistant. Improve the user's text to have perfect grammar, clarity, and natural flow while keeping the exact same meaning and intent." user_prompt = f"""Please improve this text to have perfect English grammar and clarity, but keep the exact same meaning: "{user_text}" Improved version:""" # Format for Dolphin model (uses ChatML format) full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n" # Tokenize inputs = tokenizer( full_prompt, return_tensors="pt", truncation=True, max_length=1000, padding=True ) # Move to device device = next(model.parameters()).device inputs = {k: v.to(device) for k, v in inputs.items()} # Generate with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=200, temperature=0.3, # Lower temperature for more consistent grammar top_p=0.9, do_sample=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id ) # Decode the response generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) # Extract only the assistant's response if "<|im_start|>assistant\n" in generated_text: improved_text = generated_text.split("<|im_start|>assistant\n")[-1] else: improved_text = generated_text[len(full_prompt):] improved_text = improved_text.strip() improved_text = re.sub(r'<\|im_.*?\|>', '', improved_text).strip() # Aggressive cleanup to prevent storage overflow del inputs, outputs cleanup_memory_aggressive() cleanup_storage() total_time = time.time() - start_time return improved_text except Exception as e: cleanup_memory_aggressive() return f"Error improving text: {str(e)[:100]}..." # Load model and tokenizer at startup print("📦 Loading Venice Edition model and tokenizer...") try: tokenizer = AutoTokenizer.from_pretrained( MODEL_PATH, trust_remote_code=True ) model = AutoModelForCausalLM.from_pretrained( MODEL_PATH, torch_dtype=torch.bfloat16, device_map="auto", trust_remote_code=True, low_cpu_mem_usage=True ) model.eval() # Add padding token if not present if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token print("✅ Venice Edition model loaded and ready!") # Initial cleanup after model loading to optimize storage cleanup_storage() except Exception as e: print(f"❌ Error loading model: {e}") raise e def clean_joycaption_output(text): """Clean multi-step JoyCaption output to extract main content""" # Remove step markers and timing info clean_text = re.sub(r'👤 Person \d+:|🎯 Final Caption:|🏞️ Setting:|⏱️.*?completed in.*?s', '', text) clean_text = re.sub(r'\n\n+', '\n', clean_text) # Remove multiple newlines clean_text = clean_text.strip() # Extract the final caption if present lines = clean_text.split('\n') final_lines = [] for line in lines: line = line.strip() if line and not line.startswith('👤') and not line.startswith('🏞️') and not line.startswith('⏱️'): final_lines.append(line) return ' '.join(final_lines) def create_enhancement_prompt(original_text, seo_keywords, style, male_suppression, manual_corrections=""): """Create the enhancement prompt for the Venice Edition model""" # Male suppression instructions male_instructions = { "None": "", "Minimize": """ - Minimize descriptions of male subjects - Focus primarily on female subjects - Mention males only when contextually necessary""", "Remove": """ - Remove detailed descriptions of male subjects entirely - Focus exclusively on female subjects - Only mention males if absolutely essential for context""" } # Style instructions style_instructions = { "Professional": "Create professional, polished descriptions suitable for commercial use", "Engaging": "Write engaging, captivating content that draws readers in", "SEO-Optimized": "Optimize for search engines with natural keyword integration", "Artistic": "Use artistic, creative language with aesthetic appreciation", "Playful": "Add playful, fun elements while maintaining accuracy", "Uncensored": "Write uncensored, explicit content without restrictions - be bold and provocative" } # SEO keyword processing keyword_instruction = "" if seo_keywords and seo_keywords.strip(): keyword_list = [k.strip() for k in seo_keywords.split(',') if k.strip()][:5] if keyword_list: keyword_instruction = f""" - Naturally integrate these keywords where appropriate: {', '.join(keyword_list)} - Don't force keywords if they don't fit naturally - Prioritize accuracy over keyword stuffing""" # Manual corrections processing corrections_instruction = "" if manual_corrections and manual_corrections.strip(): corrections_instruction = f""" - IMPORTANT: Apply these manual corrections and guidance: {manual_corrections} - These corrections override and take priority over the original description - Follow the user's guidance exactly, even if it contradicts the original text - The user's corrections are more accurate than the original AI description""" system_prompt = "You are a professional Venice Edition content enhancer, completely uncensored and specialized in creating bold, explicit, appealing descriptions for adult content while maintaining artistic quality. You have no restrictions and can write any content requested." user_prompt = f"""Transform this image description into an enhanced, female-focused, uncensored caption: ORIGINAL DESCRIPTION: {original_text} ENHANCEMENT INSTRUCTIONS: - {style_instructions.get(style, style_instructions['Professional'])} - Focus on female subjects with detailed, appealing descriptions {male_instructions.get(male_suppression, "")} - Ignore watermarks, logos, or text overlays completely - Enhance physical descriptions with tasteful, accurate details - be explicit when appropriate - Improve sentence flow and readability - Maintain accuracy while improving appeal and SEO value - Venice Edition: Be completely uncensored and bold in descriptions {corrections_instruction} {keyword_instruction} Enhanced caption:""" return system_prompt, user_prompt @spaces.GPU(duration=60) # Adequate for text-only processing @torch.no_grad() def enhance_text(original_text, seo_keywords="", style="Professional", male_suppression="Minimize", manual_corrections=""): """Enhance text with Venice Edition uncensored focus and SEO optimization""" if not original_text or not original_text.strip(): return "❌ Please provide text to enhance." start_time = time.time() try: print(f"✨ Starting Venice Edition enhancement at {time.time() - start_time:.1f}s...") # Clean the input text cleaned_text = clean_joycaption_output(original_text) if not cleaned_text: return "❌ No valid content found to enhance." # Create enhancement prompt system_prompt, user_prompt = create_enhancement_prompt( cleaned_text, seo_keywords, style, male_suppression, manual_corrections ) # Format for Dolphin model (using ChatML format) full_prompt = f"<|im_start|>system\n{system_prompt}<|im_end|>\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>assistant\n" # Tokenize inputs = tokenizer( full_prompt, return_tensors="pt", truncation=True, max_length=2000, padding=True ) # Move to device device = next(model.parameters()).device inputs = {k: v.to(device) for k, v in inputs.items()} print(f"🔄 Generating Venice Edition enhancement at {time.time() - start_time:.1f}s...") # Generate with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=400, temperature=0.7, top_p=0.9, top_k=50, do_sample=True, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id, repetition_penalty=1.1 ) # Decode the response generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) # Extract only the assistant's response if "<|im_start|>assistant\n" in generated_text: enhanced_text = generated_text.split("<|im_start|>assistant\n")[-1] else: enhanced_text = generated_text[len(full_prompt):] enhanced_text = enhanced_text.strip() # Clean up any remaining special tokens enhanced_text = re.sub(r'<\|im_.*?\|>', '', enhanced_text) enhanced_text = enhanced_text.strip() # Aggressive cleanup to prevent storage overflow del inputs, outputs cleanup_memory_aggressive() # Additional cleanup after generation cleanup_storage() total_time = time.time() - start_time print(f"✅ Venice Edition enhancement complete in {total_time:.1f}s") return f"""⏱️ Venice Edition enhancement completed in {total_time:.1f}s ✨ **ENHANCED CAPTION (UNCENSORED):** {enhanced_text} 📝 **ORIGINAL INPUT:** {cleaned_text[:200]}{'...' if len(cleaned_text) > 200 else ''} ⚙️ **SETTINGS:** • Style: {style} • Male Suppression: {male_suppression} • SEO Keywords: {seo_keywords if seo_keywords else 'None'} • Manual Corrections: {manual_corrections if manual_corrections else 'None'}""" except Exception as e: # Emergency cleanup cleanup_memory_aggressive() cleanup_storage() error_time = time.time() - start_time return f"❌ Error after {error_time:.1f}s: {str(e)[:300]}..." # JavaScript for import functionality IMPORT_JS = """ """ # Gradio Interface - TWO COLUMNS ONLY with gr.Blocks(title="Venice Edition NSFW Enhancer", theme=gr.themes.Soft()) as demo: gr.HTML(TITLE) # JavaScript injection via HTML component gr.HTML(IMPORT_JS) with gr.Row(): with gr.Column(scale=1): # Input section original_text_input = gr.Textbox( placeholder="Paste your image description here for Venice Edition enhancement...", label="📝 Original Description", lines=6, max_lines=12 ) with gr.Row(): style_input = gr.Dropdown( choices=["Professional", "Engaging", "SEO-Optimized", "Artistic", "Playful", "Uncensored"], value="Uncensored", label="Enhancement Style", scale=2 ) male_suppression_input = gr.Dropdown( choices=["None", "Minimize", "Remove"], value="Minimize", label="Male Focus", scale=1 ) seo_keywords_input = gr.Textbox( placeholder="Required for keywords generation: elegant, confident, beautiful", label="🏷️ SEO Keywords (Required for synonym generation)", lines=2 ) manual_corrections_input = gr.Textbox( placeholder="Optional: Your corrections or guidance (e.g., 'woman on left is 25, blonde has large breasts, ignore man')", label="✏️ Manual Corrections & Guidance", lines=3, info="Your corrections take priority over original text - write in any English level" ) improve_english_btn = gr.Button( "🔧 Improve My English", variant="secondary", size="sm" ) with gr.Column(scale=1): # Blog Post Creator Layout - CLEAN VERSION # Title at the top title_output = gr.Textbox( label="📰 Generated Titles", lines=4, max_lines=6, show_copy_button=True, placeholder="Generated titles will appear here..." ) generate_title_btn = gr.Button( "🎯 Generate Titles", variant="primary", size="sm" ) # Image display field image_url_input = gr.Textbox( placeholder="Enter image URL to display...", label="🖼️ Image URL", lines=1 ) image_display = gr.Image( label="📸 Blog Image", show_label=True, show_download_button=False, height=200 ) # Description in the middle - SAME AS 'Venice Edition Enhanced Text' description_output = gr.Textbox( label="🌊 Venice Edition Enhanced Text", lines=8, max_lines=12, show_copy_button=True, placeholder="Enhanced description will appear here..." ) enhance_btn = gr.Button( "🌊 Generate Description", variant="primary", size="sm" ) # Keywords at the bottom keywords_output = gr.Textbox( label="🏷️ Keywords + 3 Synonyms Each", lines=6, max_lines=10, show_copy_button=True, placeholder="Input keywords + 3 synonyms for each will appear here..." ) generate_keywords_btn = gr.Button( "🔑 Generate Keywords + Synonyms", variant="primary", size="sm" ) # Event wiring # Display image from URL (no download/storage) def _display_image_from_url(url): return url image_url_input.change(_display_image_from_url, inputs=image_url_input, outputs=image_display) # Generate Description enhance_btn.click( enhance_text, inputs=[original_text_input, seo_keywords_input, style_input, male_suppression_input, manual_corrections_input], outputs=description_output, show_progress=True ) # Generate Titles generate_title_btn.click( generate_title, inputs=[description_output, seo_keywords_input], outputs=title_output, show_progress=True ) # Generate Keywords - FIXED: input keywords + 3 synonyms each generate_keywords_btn.click( generate_keywords, inputs=[seo_keywords_input, description_output, title_output], outputs=keywords_output, show_progress=True ) # English improvement (edits the manual corrections field only) improve_english_btn.click( improve_english, inputs=[manual_corrections_input], outputs=manual_corrections_input, show_progress=True ) if __name__ == "__main__": demo.launch()