nickdigger's picture
Upload app.py with huggingface_hub
d8536dc verified
import spaces
import gradio as gr
import torch
from transformers import LlavaForConditionalGeneration, AutoProcessor
from PIL import Image
import gc
import time
import gc
import os
import shutil
import json
from pathlib import Path
# Storage optimization - redirect cache to temporary directories
os.environ["HF_HOME"] = "/tmp/hf_cache"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
os.environ["HF_DATASETS_CACHE"] = "/tmp/datasets_cache"
os.environ["TORCH_HOME"] = "/tmp/torch_cache"
# Model configuration
MODEL_PATH = "fancyfeast/llama-joycaption-beta-one-hf-llava"
def cleanup_storage():
"""Clean up temporary files and caches to prevent storage overflow"""
try:
# Clean up temporary caches
temp_dirs = ["/tmp/hf_cache", "/tmp/transformers_cache", "/tmp/datasets_cache", "/tmp/torch_cache"]
for temp_dir in temp_dirs:
if os.path.exists(temp_dir):
shutil.rmtree(temp_dir, ignore_errors=True)
# Force garbage collection
gc.collect()
# Clear GPU cache if available
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
print("βœ… Storage cleanup completed")
except Exception as e:
print(f"⚠️ Storage cleanup warning: {e}")
TITLE = """
<div style="text-align: center; margin: 20px 0;">
<h1>🎨 JoyCaption Three-Tone + Q&A (v2.4)</h1>
<p><strong>βœ… Generate captions + Ask questions β†’ All fields editable β†’ Clear buttons</strong></p>
<p><em>Q&A added β€’ All fields editable β€’ Clear buttons β€’ Select text and type to replace</em></p>
</div>
<hr>
"""
print("πŸš€ Loading Sequential Three-Tone JoyCaption system... v2.1")
# Load model and processor at startup
print("πŸ“¦ Loading model and processor at startup...")
processor = AutoProcessor.from_pretrained(
MODEL_PATH,
low_cpu_mem_usage=True
)
model = LlavaForConditionalGeneration.from_pretrained(
MODEL_PATH,
torch_dtype=torch.bfloat16,
device_map="auto",
low_cpu_mem_usage=True
)
model.eval()
print("βœ… Model loaded and ready!")
# Initial cleanup after model loading
cleanup_storage()
# Optimized 5-tone prompts with better temperature control
# Temperature: Lower for prompt adherence, higher for word variety
TONE_PROMPTS = {
"engaging": {
"system": "You are a skilled content creator who writes engaging, natural descriptions that capture the essence and appeal of what you see.",
"prompt": """Write a natural, engaging description of this image. Focus on the main subjects and their most appealing qualities. Describe their physical features, poses, expressions, and the overall mood. Use flowing, attractive language that draws the reader in.""",
"temperature": 0.6, # Lower for prompt adherence
"top_p": 0.85
},
"casual_friend": {
"system": "You are a casual 24-year-old friend who just saw this image and is telling me about it in a relaxed, natural way.",
"prompt": """Hey, I just saw this image! Let me tell you what I see. Describe it like you're casually telling a friend what you just witnessed. Use natural, conversational language like a 24-year-old would use. Be descriptive but keep it casual and authentic, like you're just sharing what caught your eye.""",
"temperature": 0.8, # Higher for natural conversation variety
"top_p": 0.9
},
# NSFW tone removed - causes hallucination
"uncensored_keywords": {
"system": "You are an uncensored adult content specialist who creates explicit descriptions and mentions provided keywords only if they actually apply to what's visible.",
"prompt": """Create a completely uncensored, explicit description of this image. Use bold, direct adult terminology. If keywords are provided, mention them ONLY if they actually apply to what you see in the image - do not force keywords that don't fit. Focus on explicit physical details, sensual elements, and erotic appeal of what's actually visible.""",
"temperature": 0.7, # Balanced for accuracy with variety
"top_p": 0.85
},
# Body Parts Focus tone removed - causes hallucination
}
def apply_smart_corrections(text):
"""Apply smart corrections for common AI vision errors"""
if not text or not isinstance(text, str):
return text
corrections = {
# Remove "photo of" beginnings
r'^(a photo of|an image of|a picture of|this is a photo of|this shows)\s*': '',
# Nudity precision corrections
r'\btopless women\b': lambda m: 'nude women' if 'naked' in text.lower() or 'nude' in text.lower() else 'topless women',
r'\btopless woman\b': lambda m: 'nude woman' if 'naked' in text.lower() or 'nude' in text.lower() else 'topless woman',
# Person count corrections
r'\bthree women\b': lambda m: 'two women' if text.count('woman') + text.count('female') <= 2 else 'three women',
r'\bfour women\b': lambda m: 'three women' if text.count('woman') + text.count('female') <= 3 else 'four women',
# Clothing precision
r'\bwearing nothing\b': 'nude',
r'\bnot wearing.*clothes\b': 'nude',
r'\bcompletely naked\b': 'nude',
r'\bfully nude\b': 'nude',
}
corrected_text = text
try:
for pattern, replacement in corrections.items():
if callable(replacement):
corrected_text = re.sub(pattern, replacement, corrected_text, flags=re.IGNORECASE)
else:
corrected_text = re.sub(pattern, replacement, corrected_text, flags=re.IGNORECASE)
except Exception as e:
print(f"Error in smart corrections: {e}")
return text
return corrected_text
def safe_generate_caption_direct(image, tone, max_chars=600, keywords_text="", custom_instruction=""):
"""Generate caption directly with keywords and custom instructions support"""
try:
if image is None:
return f"❌ No image provided for {tone} caption"
# Get tone configuration
tone_config = TONE_PROMPTS.get(tone, TONE_PROMPTS["engaging"])
# Modify prompt based on tone and provided keywords/instructions
base_prompt = tone_config["prompt"]
# Add keywords instruction for uncensored_keywords tone
if tone == "uncensored_keywords" and keywords_text and keywords_text.strip():
base_prompt += f"\n\nKeywords to mention IF applicable: {keywords_text.strip()}"
# Add custom instruction to any tone if provided
if custom_instruction and custom_instruction.strip():
base_prompt += f"\n\nMake sure to mention: {custom_instruction.strip()}\nInclude this detail naturally in your description."
# Create conversation
convo = [
{"role": "system", "content": tone_config["system"]},
{"role": "user", "content": base_prompt}
]
convo_string = processor.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
inputs = processor(text=[convo_string], images=[image], return_tensors="pt")
device = next(model.parameters()).device
inputs = {k: v.to(device, non_blocking=True) if hasattr(v, 'to') else v for k, v in inputs.items()}
if 'pixel_values' in inputs:
inputs['pixel_values'] = inputs['pixel_values'].to(torch.bfloat16)
# Get tone-specific generation parameters
temperature = tone_config.get("temperature", 0.7)
top_p = tone_config.get("top_p", 0.9)
with torch.no_grad():
output = model.generate(
**inputs,
max_new_tokens=150, # Increased for complete sentences
do_sample=True,
temperature=temperature,
top_p=top_p,
top_k=None,
use_cache=True,
pad_token_id=processor.tokenizer.eos_token_id,
eos_token_id=processor.tokenizer.eos_token_id
)
# Safe decoding
if output is None or len(output) == 0:
return f"❌ No output generated for {tone}"
# Decode result
if 'input_ids' in inputs and len(inputs['input_ids'].shape) >= 2:
input_length = inputs['input_ids'].shape[1]
if len(output[0]) > input_length:
generate_ids = output[0][input_length:]
result = processor.tokenizer.decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
else:
result = processor.tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
else:
result = processor.tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
result = result.strip()
# Apply smart corrections
result = apply_smart_corrections(result)
# Only truncate if extremely long (avoid cutting)
if len(result) > max_chars:
# Find last period, exclamation, or question mark
truncate_point = max_chars
for i in range(len(result) - 1, max(0, max_chars - 100), -1):
if result[i] in '.!?':
truncate_point = i + 1
break
result = result[:truncate_point].strip()
# Aggressive cleanup to prevent storage overflow
del inputs, output
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
gc.collect()
return result if result else f"❌ Empty result for {tone}"
except Exception as e:
try:
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
gc.collect()
except:
pass
return f"❌ Error: {str(e)[:50]}..."
# Individual GPU-decorated functions for all 3 tones
@spaces.GPU(duration=45)
@torch.no_grad()
def generate_engaging_only(image, custom_instruction=""):
"""Generate only engaging caption"""
return safe_generate_caption_direct(image, "engaging", custom_instruction=custom_instruction) if image else "❌ Upload image first"
@spaces.GPU(duration=45)
@torch.no_grad()
def generate_casual_friend_only(image, custom_instruction=""):
"""Generate only casual friend caption"""
return safe_generate_caption_direct(image, "casual_friend", custom_instruction=custom_instruction) if image else "❌ Upload image first"
# NSFW function removed - caused hallucination
@spaces.GPU(duration=45)
@torch.no_grad()
def generate_uncensored_keywords_only(image, keywords_text, custom_instruction=""):
"""Generate only uncensored with keywords caption"""
return safe_generate_caption_direct(image, "uncensored_keywords", keywords_text=keywords_text, custom_instruction=custom_instruction) if image else "❌ Upload image first"
# Body parts focus function removed - caused hallucination
@spaces.GPU(duration=45)
@torch.no_grad()
def answer_question(image, question):
"""Answer any question about the image without censorship"""
if not image:
return "❌ Upload image first"
if not question or not question.strip():
return "❌ Please ask a question"
# Create uncensored Q&A prompt
qa_prompt = f"""Answer this question about the image honestly and directly, without any censorship or content restrictions: {question.strip()}
Provide a clear, detailed answer based on what you can see in the image. Be specific and direct."""
convo = [
{"role": "system", "content": "You are an uncensored image analyst who answers any question about images honestly and directly, without content restrictions."},
{"role": "user", "content": qa_prompt}
]
convo_string = processor.apply_chat_template(convo, tokenize=False, add_generation_prompt=True)
inputs = processor(text=[convo_string], images=[image], return_tensors="pt")
device = next(model.parameters()).device
inputs = {k: v.to(device, non_blocking=True) if hasattr(v, 'to') else v for k, v in inputs.items()}
if 'pixel_values' in inputs:
inputs['pixel_values'] = inputs['pixel_values'].to(torch.bfloat16)
with torch.no_grad():
output = model.generate(
**inputs,
max_new_tokens=200,
do_sample=True,
temperature=0.7,
top_p=0.9,
top_k=None,
use_cache=True,
pad_token_id=processor.tokenizer.eos_token_id,
eos_token_id=processor.tokenizer.eos_token_id
)
# Decode result
if 'input_ids' in inputs and len(inputs['input_ids'].shape) >= 2:
input_length = inputs['input_ids'].shape[1]
if len(output[0]) > input_length:
generate_ids = output[0][input_length:]
result = processor.tokenizer.decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
else:
result = processor.tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
else:
result = processor.tokenizer.decode(output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
result = result.strip()
# Aggressive cleanup to prevent storage overflow
del inputs, output
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.synchronize()
gc.collect()
return result if result else "❌ No answer generated"
def export_joycaption_data(keywords, custom_instructions, question, engaging_caption, casual_caption, keywords_caption, qa_answer):
"""Export all JoyCaption data as downloadable JSON"""
try:
# Collect all the data
data = {
"timestamp": time.strftime("%Y-%m-%d %H:%M:%S"),
"source": "JoyCaption",
"data": {}
}
# Add input fields
if keywords and keywords.strip():
data["data"]["keywords"] = keywords.strip()
if custom_instructions and custom_instructions.strip():
data["data"]["custom_instructions"] = custom_instructions.strip()
if question and question.strip():
data["data"]["question"] = question.strip()
# Add generated captions
if engaging_caption and engaging_caption.strip():
data["data"]["caption_engaging"] = engaging_caption.strip()
if casual_caption and casual_caption.strip():
data["data"]["caption_casual_friend"] = casual_caption.strip()
if keywords_caption and keywords_caption.strip():
data["data"]["caption_keywords"] = keywords_caption.strip()
if qa_answer and qa_answer.strip():
data["data"]["qa_answer"] = qa_answer.strip()
# Check if we have any data to export
if not data["data"]:
return "❌ No data to export. Generate some captions first!", None
# Create JSON string
json_string = json.dumps(data, indent=2, ensure_ascii=False)
# Create filename with timestamp
filename = f"joycaption_data_{time.strftime('%Y%m%d_%H%M%S')}.json"
# Return success message and file data
fields_count = len(data["data"])
return f"βœ… Exported {fields_count} fields: {', '.join(data['data'].keys())}", (json_string, filename)
except Exception as e:
return f"❌ Export failed: {str(e)}", None
# JavaScript for export functionality
EXPORT_JS = """
<script>
// JoyCaption Export System
(function() {
console.log('πŸš€ Initializing JoyCaption Export System...');
// Extract data from page fields
window.getJoyCaptionData = function() {
console.log('πŸ“Š Extracting JoyCaption data...');
const data = {};
// Get all textareas and inputs from the page
const allInputs = document.querySelectorAll('textarea, input[type="text"]');
allInputs.forEach((field, index) => {
const placeholder = (field.placeholder || '').toLowerCase();
const value = field.value ? field.value.trim() : '';
// Skip empty fields
if (!value) return;
// Map based on placeholder text and content length
if (placeholder.includes('engaging') || (value.length > 50 && placeholder.includes('generate engaging'))) {
data.caption_engaging = value;
} else if (placeholder.includes('casual') || placeholder.includes('friend') || (value.length > 50 && placeholder.includes('generate casual'))) {
data.caption_casual_friend = value;
} else if (placeholder.includes('keyword') && value.length > 50) {
data.caption_keywords = value;
} else if (placeholder.includes('keyword') && value.length <= 50) {
data.keywords = value;
} else if (placeholder.includes('custom') || placeholder.includes('make sure') || placeholder.includes('mention')) {
data.custom_instructions = value;
} else if (placeholder.includes('question')) {
data.question = value;
} else if (value.length > 50) {
// Long text likely a caption
if (!data.caption_engaging) data.caption_engaging = value;
else if (!data.caption_casual_friend) data.caption_casual_friend = value;
else if (!data.caption_keywords) data.caption_keywords = value;
}
});
// Add image URLs if present
const images = document.querySelectorAll('img');
const imageUrls = [];
images.forEach(img => {
if (img.src && !img.src.includes('data:') && !img.src.includes('blob:')) {
imageUrls.push(img.src);
}
});
if (imageUrls.length > 0) {
data.image_urls = imageUrls;
}
console.log('πŸ“¦ Extracted data:', data);
return data;
};
// Listen for extension requests
window.addEventListener('message', function(event) {
if (event.data && event.data.action === 'getJoyCaptionData') {
const data = window.getJoyCaptionData();
event.source.postMessage({
action: 'joyCaptionData',
data: data,
success: Object.keys(data).length > 0
}, event.origin);
}
});
// Export functionality
window.downloadJoyCaptionData = function() {
try {
const rawData = window.getJoyCaptionData();
if (Object.keys(rawData).length === 0) {
alert('❌ No data found to export. Make sure you have generated captions first.');
return;
}
// Package data for export
const exportData = {
timestamp: new Date().toISOString(),
source: 'JoyCaption',
data: rawData
};
// Create and download JSON file
const jsonString = JSON.stringify(exportData, null, 2);
const blob = new Blob([jsonString], { type: 'application/json' });
const url = URL.createObjectURL(blob);
const a = document.createElement('a');
a.href = url;
a.download = `joycaption_data_${new Date().toISOString().slice(0, 16).replace(/:/g, '-')}.json`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
alert(`βœ… Downloaded JoyCaption data with ${Object.keys(rawData).length} fields!`);
console.log('πŸ“₯ Downloaded data:', exportData);
} catch (error) {
console.error('❌ Export error:', error);
alert('❌ Export failed: ' + error.message);
}
};
// Create export button
function createExportButton() {
// Remove any existing button first
const existingBtn = document.getElementById('joyCaption-export-btn');
if (existingBtn) existingBtn.remove();
// Create a floating export button
const exportBtn = document.createElement('button');
exportBtn.id = 'joyCaption-export-btn';
exportBtn.innerHTML = 'πŸ“₯ Export JoyCaption Data';
exportBtn.style.cssText = `
position: fixed;
top: 20px;
right: 20px;
z-index: 9999;
background: linear-gradient(135deg, #ff6b35, #f7931e);
color: white;
border: none;
padding: 12px 20px;
border-radius: 25px;
font-weight: 600;
cursor: pointer;
box-shadow: 0 4px 12px rgba(255, 107, 53, 0.3);
transition: all 0.3s ease;
`;
exportBtn.addEventListener('mouseover', () => {
exportBtn.style.transform = 'translateY(-2px)';
exportBtn.style.boxShadow = '0 6px 16px rgba(255, 107, 53, 0.4)';
});
exportBtn.addEventListener('mouseout', () => {
exportBtn.style.transform = 'translateY(0)';
exportBtn.style.boxShadow = '0 4px 12px rgba(255, 107, 53, 0.3)';
});
exportBtn.addEventListener('click', window.downloadJoyCaptionData);
document.body.appendChild(exportBtn);
console.log('βœ… Export button created and attached to body');
}
// Multiple attempts to create button after Gradio loads
setTimeout(createExportButton, 1000);
setTimeout(createExportButton, 3000);
setTimeout(createExportButton, 5000);
// Also try when DOM changes (Gradio dynamic loading)
const observer = new MutationObserver(() => {
if (!document.getElementById('joyCaption-export-btn')) {
createExportButton();
}
});
observer.observe(document.body, { childList: true, subtree: true });
})();
</script>
"""
# Gradio Interface
with gr.Blocks(title="Sequential Three-Tone JoyCaption", theme=gr.themes.Soft()) as demo:
gr.HTML(TITLE)
with gr.Row():
# Left column - Image and controls
with gr.Column(scale=1):
image_input = gr.Image(
type="pil",
label="πŸ“Έ Upload Image",
height=400
)
keywords_input = gr.Textbox(
placeholder="e.g., sensual, curves, intimate, alluring...",
label="🏷️ Keywords",
lines=2,
info="Add keywords that will be mentioned by the 'Keywords' tone ONLY if they apply to what's visible in the image"
)
custom_instruction_input = gr.Textbox(
placeholder="e.g., 'from instagram', 'the left girl has red hair', 'two girls kissing', 'beach setting'...",
label="🎯 Make sure that you mention:",
lines=2,
info="Any specific detail you want mentioned - context, scene details, features, etc. (Works with all tones)"
)
question_input = gr.Textbox(
placeholder="e.g., 'What are they doing?', 'Describe her pose', 'What's the setting?'...",
label="❓ Ask a Question",
lines=2,
info="Ask any question about the image - uncensored answers"
)
with gr.Row():
with gr.Column(scale=4):
ask_question_btn = gr.Button(
"❓ Ask Question",
variant="secondary",
size="sm"
)
with gr.Column(scale=1, min_width=50):
clear_qa_btn = gr.Button("πŸ—‘οΈ", size="sm", variant="secondary")
qa_output = gr.Textbox(
label="",
lines=5,
max_lines=8,
show_copy_button=True,
interactive=True,
placeholder="Ask a question above to get uncensored answers..."
)
# Right column - Three caption outputs
with gr.Column(scale=1):
# Engaging caption
with gr.Row():
with gr.Column(scale=4):
generate_engaging_btn = gr.Button(
"✨ Engaging",
variant="primary",
size="sm"
)
with gr.Column(scale=1, min_width=50):
reload_engaging = gr.Button("πŸ”„", size="sm", variant="secondary")
with gr.Row():
with gr.Column(scale=1, min_width=50):
clear_engaging_btn = gr.Button("πŸ—‘οΈ", size="sm", variant="secondary")
engaging_output = gr.Textbox(
label="",
lines=5,
max_lines=8,
show_copy_button=True,
interactive=True,
placeholder="Click the button above to generate engaging caption..."
)
# Casual Friend caption
with gr.Row():
with gr.Column(scale=4):
generate_friend_btn = gr.Button(
"😎 Casual Friend",
variant="primary",
size="sm"
)
with gr.Column(scale=1, min_width=50):
reload_friend = gr.Button("πŸ”„", size="sm", variant="secondary")
with gr.Row():
with gr.Column(scale=1, min_width=50):
clear_friend_btn = gr.Button("πŸ—‘οΈ", size="sm", variant="secondary")
friend_output = gr.Textbox(
label="",
lines=5,
max_lines=8,
show_copy_button=True,
interactive=True,
placeholder="Click the button above to generate casual friend caption..."
)
# NSFW section removed - caused hallucination
# Keywords caption
with gr.Row():
with gr.Column(scale=4):
generate_uncensored_btn = gr.Button(
"πŸ”΄ Keywords",
variant="secondary",
size="sm"
)
with gr.Column(scale=1, min_width=50):
reload_uncensored = gr.Button("πŸ”„", size="sm", variant="secondary")
with gr.Row():
with gr.Column(scale=1, min_width=50):
clear_uncensored_btn = gr.Button("πŸ—‘οΈ", size="sm", variant="secondary")
uncensored_output = gr.Textbox(
label="",
lines=5,
max_lines=8,
show_copy_button=True,
interactive=True,
placeholder="Click the button above to generate keywords caption..."
)
# Body Parts Focus section removed - caused hallucination
gr.Markdown("""
### 🎨 **Three-Tone Caption System**
**✨ Engaging**: Natural, flowing descriptions that capture appeal and mood
**😎 Casual Friend**: Relaxed, friendly descriptions as if talking to a buddy
**πŸ”΄ Uncensored + Keywords**: Bold, explicit descriptions enhanced with your keywords
### πŸ”„ **Individual Generation Benefits**
- **Complete control** - Generate only what you want, when you want
- **True individual processing** - Each button generates independently
- **Complete sentences** - Smart truncation at sentence boundaries
- **Keyword integration** - Use keywords for Uncensored tone when applicable
- **No "photo of" beginnings** - Direct, natural descriptions
- **No hallucination** - Only describes what's actually visible
### πŸ’‘ **Individual-Only Workflow**
1. Upload image
2. Add keywords (for Uncensored + Keywords tone)
3. Ask questions for additional context
4. Click ONLY the tone you want
5. Generate one at a time - no simultaneous processing
6. Copy the result and enhance it with the Text Enhancer space
7. Use πŸ”„ reload to regenerate that specific tone
8. **πŸ“₯ Export data** to use with Venice Edition Enhancer
""")
# Export functionality
with gr.Row():
export_btn = gr.Button(
"πŸ“₯ Export All Data (JSON)",
variant="primary",
size="lg"
)
export_output = gr.Textbox(
label="Export Status",
lines=2,
interactive=False,
visible=False
)
export_file = gr.File(
label="Download JSON",
visible=False
)
# Individual generate button handlers
generate_engaging_btn.click(
generate_engaging_only,
inputs=[image_input, custom_instruction_input],
outputs=engaging_output,
show_progress=True
)
generate_friend_btn.click(
generate_casual_friend_only,
inputs=[image_input, custom_instruction_input],
outputs=friend_output,
show_progress=True
)
# NSFW button handler removed
generate_uncensored_btn.click(
generate_uncensored_keywords_only,
inputs=[image_input, keywords_input, custom_instruction_input],
outputs=uncensored_output,
show_progress=True
)
# Body Parts Focus button handler removed
# Individual reload buttons - using direct generation for consistency
def reload_engaging_fn(image, custom_instruction):
return safe_generate_caption_direct(image, "engaging", custom_instruction=custom_instruction) if image else "❌ Upload image first"
def reload_friend_fn(image, custom_instruction):
return safe_generate_caption_direct(image, "casual_friend", custom_instruction=custom_instruction) if image else "❌ Upload image first"
# NSFW reload function removed
def reload_uncensored_fn(image, keywords, custom_instruction):
return safe_generate_caption_direct(image, "uncensored_keywords", keywords_text=keywords, custom_instruction=custom_instruction) if image else "❌ Upload image first"
# Body Parts Focus reload function removed
reload_engaging.click(
reload_engaging_fn,
inputs=[image_input, custom_instruction_input],
outputs=engaging_output,
show_progress=True
)
reload_friend.click(
reload_friend_fn,
inputs=[image_input, custom_instruction_input],
outputs=friend_output,
show_progress=True
)
# NSFW reload click handler removed
reload_uncensored.click(
reload_uncensored_fn,
inputs=[image_input, keywords_input, custom_instruction_input],
outputs=uncensored_output,
show_progress=True
)
# Body Parts Focus reload click handler removed
# Q&A functionality
ask_question_btn.click(
answer_question,
inputs=[image_input, question_input],
outputs=qa_output,
show_progress=True
)
# Clear button functions
def clear_text():
return ""
clear_qa_btn.click(
clear_text,
outputs=qa_output
)
clear_engaging_btn.click(
clear_text,
outputs=engaging_output
)
clear_friend_btn.click(
clear_text,
outputs=friend_output
)
# NSFW clear button handler removed
clear_uncensored_btn.click(
clear_text,
outputs=uncensored_output
)
# Export functionality
def handle_export():
"""Handle the export button click"""
# Get current values from all fields
return export_joycaption_data(
keywords_input.value or "",
custom_instruction_input.value or "",
question_input.value or "",
engaging_output.value or "",
friend_output.value or "",
uncensored_output.value or "",
qa_output.value or ""
)
export_btn.click(
export_joycaption_data,
inputs=[
keywords_input,
custom_instruction_input,
question_input,
engaging_output,
friend_output,
uncensored_output,
qa_output
],
outputs=[export_output, export_file]
).then(
lambda: gr.update(visible=True),
outputs=[export_output]
).then(
lambda: gr.update(visible=True),
outputs=[export_file]
)
# Body Parts Focus clear button handler removed
if __name__ == "__main__":
demo.launch()