En3rGy commited on
Commit
71911cb
·
verified ·
1 Parent(s): b04cb39

Delete deepsite_backend.py

Browse files
Files changed (1) hide show
  1. deepsite_backend.py +0 -122
deepsite_backend.py DELETED
@@ -1,122 +0,0 @@
1
- import gradio as gr
2
- import torch
3
- from PIL import Image
4
- from transformers import BlipProcessor, BlipForConditionalGeneration, CLIPProcessor, CLIPModel
5
- from torchvision import transforms
6
-
7
- # Geräteeinstellung
8
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
-
10
- # BLIP-Modelle
11
- blip_processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
12
- blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(device)
13
-
14
- # CLIP-Modelle
15
- clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32").to(device)
16
- clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
17
-
18
- # Platzhalter für DeepDanbooru
19
-
20
- def danbooru_tagging(image):
21
- return "1girl, bodysuit, sitting, wooden floor, solo"
22
-
23
- def generate_blip_caption(image):
24
- raw_image = Image.open(image).convert("RGB")
25
- inputs = blip_processor(raw_image, return_tensors="pt").to(device)
26
- out = blip_model.generate(**inputs)
27
- caption = blip_processor.decode(out[0], skip_special_tokens=True)
28
- return caption
29
-
30
- def generate_clip_prompt(image, detail_level):
31
- raw_image = Image.open(image).convert("RGB")
32
- inputs = clip_processor(images=raw_image, return_tensors="pt").to(device)
33
- outputs = clip_model.get_image_features(**inputs)
34
- base_prompt = "a woman in a bodysuit on wooden floor"
35
- if detail_level >= 4:
36
- return base_prompt + ", cinematic lighting, ultra detailed, HDR"
37
- elif detail_level == 3:
38
- return base_prompt + ", moody atmosphere"
39
- elif detail_level == 2:
40
- return base_prompt + ", minimal shadows"
41
- else:
42
- return base_prompt
43
-
44
- def get_output(image, output_type, style, detail_level, tags, model_choice):
45
- if model_choice == "BLIP":
46
- if output_type == "Detailed Description":
47
- return generate_blip_caption(image)
48
- elif output_type == "Short Caption":
49
- return generate_blip_caption(image).split(",")[0]
50
- elif output_type == "Model Training Data":
51
- return generate_blip_caption(image).lower().replace(" ", "_")
52
- elif output_type == "AI Prompt":
53
- return generate_clip_prompt(image, detail_level)
54
- elif model_choice == "CLIP":
55
- return generate_clip_prompt(image, detail_level)
56
- elif model_choice == "DeepDanbooru":
57
- return danbooru_tagging(image)
58
- elif model_choice == "NSFW Detector":
59
- return "(Simulierter NSFW-Klassifikator: Ergebnis nicht implementiert)"
60
- return "[Keine gültige Auswahl getroffen]"
61
-
62
- with gr.Blocks(css="style.css") as app:
63
- gr.Markdown("# NSFW Image to Text Generator ✨")
64
-
65
- with gr.Row():
66
- with gr.Column():
67
- img = gr.Image(type="filepath", label="Upload Image")
68
- output_type = gr.Dropdown([
69
- "Detailed Description", "Short Caption", "Tag List", "AI Prompt", "Model Training Data"],
70
- label="Output Type", value="Detailed Description")
71
- style = gr.Dropdown([
72
- "Neutral", "Erotic", "Artistic", "Technical", "Literary", "BDSM", "Fetish"],
73
- label="Style", value="Neutral")
74
- detail = gr.Slider(1, 5, step=1, value=3, label="Detail Level")
75
- tags = gr.Textbox(label="Custom Tags (comma separated)")
76
- model_choice = gr.Radio([
77
- "CLIP", "BLIP", "DeepDanbooru", "NSFW Detector"],
78
- label="AI Model", value="BLIP")
79
- btn_generate = gr.Button("Generate Text")
80
-
81
- with gr.Column():
82
- output = gr.Textbox(label="Generated Output", lines=8)
83
- gr.Button("Enhance")
84
- gr.Button("Shorten")
85
- gr.Button("Rewrite")
86
-
87
- btn_generate.click(get_output,
88
- inputs=[img, output_type, style, detail, tags, model_choice],
89
- outputs=output)
90
-
91
- with gr.Tab("Prompt Tools"):
92
- prompt_input = gr.Textbox(label="Prompt Builder")
93
- btn_optimize = gr.Button("Optimize Prompt")
94
- btn_random = gr.Button("Randomize")
95
- optimized_output = gr.Textbox(label="Optimized Prompt")
96
- btn_optimize.click(lambda p: p + ", ultra detailed", inputs=prompt_input, outputs=optimized_output)
97
- btn_random.click(lambda: "a cyberpunk alley at night", outputs=optimized_output)
98
-
99
- with gr.Tab("Training Data"):
100
- btn_tags = gr.Button("Generate Tags")
101
- tags_out = gr.Textbox(label="Training Tags")
102
- btn_tags.click(lambda: "1girl, solo, black bodysuit, sitting", outputs=tags_out)
103
-
104
- caption_mode = gr.Dropdown([
105
- "Basic Caption", "Detailed Description", "Booru Style", "Natural Language"],
106
- label="Caption Generation")
107
- btn_caption = gr.Button("Generate Caption")
108
- caption_out = gr.Textbox(label="Training Caption")
109
- btn_caption.click(lambda mode: {
110
- "Basic Caption": "A woman posing for a photo",
111
- "Detailed Description": "A woman in a futuristic city wearing a sleek bodysuit.",
112
- "Booru Style": "1girl, bodysuit, city, night",
113
- "Natural Language": "She stands still beneath neon lights, calm yet focused."
114
- }.get(mode, ""), inputs=caption_mode, outputs=caption_out)
115
-
116
- trigger_word = gr.Textbox(label="Trigger Word")
117
- trigger_class = gr.Textbox(label="Class")
118
- btn_lora = gr.Button("Prepare LoRA Training Data")
119
- lora_out = gr.Textbox(label="LoRA Output")
120
- btn_lora.click(lambda t, c: f"LoRA: {t}, class: {c}", inputs=[trigger_word, trigger_class], outputs=lora_out)
121
-
122
- app.launch()