techyygarry commited on
Commit
fb7c3bf
·
verified ·
1 Parent(s): b2d7687

Upload 4 files

Browse files
Files changed (4) hide show
  1. LICENSE +10 -0
  2. README.md +41 -11
  3. app.py +87 -148
  4. requirements.txt +3 -4
LICENSE ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions...
8
+
9
+ (Full license text is truncated here, please include the full version from:
10
+ https://www.apache.org/licenses/LICENSE-2.0.txt)
README.md CHANGED
@@ -1,14 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
- title: GjnxGarry2
3
- emoji: 🖼
4
- colorFrom: purple
5
- colorTo: red
6
- sdk: gradio
7
- sdk_version: 5.25.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- short_description: Lora
 
 
 
 
 
 
 
 
 
 
12
  ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
1
+ # GNX Flux LoRA Inference (ComfyUI via Gradio)
2
+
3
+ This Hugging Face Space runs a Flux .1 Dev LoRA model trained by @gauravjuneja using the ComfyUI framework.
4
+
5
+ ### ✨ Features
6
+ - Custom prompt input
7
+ - Runs locally with preloaded LoRA and UNet model
8
+ - Inference pipeline powered by ComfyUI
9
+
10
+ ### 🔧 Usage
11
+ 1. Type a prompt like:
12
+ ```
13
+ gjnx is driving a Ferrari on a road in Germany
14
+ ```
15
+ 2. Wait for the CPU-based inference to return a generated image.
16
+
17
  ---
18
+
19
+ ### 📁 Model Structure
20
+
21
+ This Space uses the following models:
22
+ - `flux1-dev.sft` — UNet
23
+ - `clip_l.safetensors` — CLIP-1
24
+ - `t5xxl_fp16.safetensors` — CLIP-2
25
+ - `ae.safetensors` — VAE
26
+ - `gauravjuneja4.safetensors` — Trained LoRA
27
+
28
+ Please ensure these are under the proper usage rights.
29
+
30
+ ---
31
+
32
+ ### 📜 License
33
+
34
+ Code: [Apache 2.0](LICENSE)
35
+ LoRA Weights: Provided for educational/research use
36
+ Base Models: Original authors’ licenses apply (e.g., CreativeML Open RAIL-M if SD-derived)
37
+
38
  ---
39
 
40
+ ### 🚀 Deployed With
41
+
42
+ - [ComfyUI](https://github.com/comfyanonymous/ComfyUI)
43
+ - [Gradio](https://www.gradio.app/)
44
+ - [Hugging Face Spaces](https://huggingface.co/spaces)
app.py CHANGED
@@ -1,154 +1,93 @@
1
- import gradio as gr
2
- import numpy as np
3
- import random
4
 
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
  import torch
 
8
 
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
-
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
-
80
- run_button = gr.Button("Run", scale=0, variant="primary")
81
-
82
- result = gr.Image(label="Result", show_label=False)
83
-
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
-
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
-
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
- )
152
 
153
  if __name__ == "__main__":
154
  demo.launch()
 
 
 
 
1
 
2
+ import gradio as gr
3
+ from gnx_flux_lora import import_custom_nodes, NODE_CLASS_MAPPINGS, get_value_at_index
4
  import torch
5
+ import random
6
 
7
+ # Initialize once (model loading outside inference function for Hugging Face ZeroGPU)
8
+ import_custom_nodes()
9
+
10
+ unetloader = NODE_CLASS_MAPPINGS["UNETLoader"]()
11
+ unetloader_3 = unetloader.load_unet(
12
+ unet_name="flux1-dev.sft", weight_dtype="default"
13
+ )
14
+
15
+ dualcliploader = NODE_CLASS_MAPPINGS["DualCLIPLoader"]()
16
+ dualcliploader_8 = dualcliploader.load_clip(
17
+ clip_name1="clip_l.safetensors",
18
+ clip_name2="t5xxl_fp16.safetensors",
19
+ type="flux",
20
+ device="default",
21
+ )
22
+
23
+ loraloader = NODE_CLASS_MAPPINGS["LoraLoader"]()
24
+ loraloader_12 = loraloader.load_lora(
25
+ lora_name="gauravjuneja4/gauravjuneja4.safetensors",
26
+ strength_model=1,
27
+ strength_clip=1,
28
+ model=get_value_at_index(unetloader_3, 0),
29
+ clip=get_value_at_index(dualcliploader_8, 0),
30
+ )
31
+
32
+ vaeloader = NODE_CLASS_MAPPINGS["VAELoader"]()
33
+ vae_9 = vaeloader.load_vae(vae_name="ae.safetensors")
34
+
35
+ cliptextencode = NODE_CLASS_MAPPINGS["CLIPTextEncode"]()
36
+ fluxguidance = NODE_CLASS_MAPPINGS["FluxGuidance"]()
37
+ ksampler = NODE_CLASS_MAPPINGS["KSampler"]()
38
+ vaedecode = NODE_CLASS_MAPPINGS["VAEDecode"]()
39
+ emptylatentimage = NODE_CLASS_MAPPINGS["EmptyLatentImage"]()
40
+
41
+ def run_inference(prompt: str):
42
+ with torch.inference_mode():
43
+ # Text encodings
44
+ positive = cliptextencode.encode(
45
+ text=prompt,
46
+ clip=get_value_at_index(loraloader_12, 1),
47
+ )
48
+
49
+ negative = cliptextencode.encode(
50
+ text="",
51
+ clip=get_value_at_index(loraloader_12, 1),
52
+ )
53
+
54
+ latent = emptylatentimage.generate(
55
+ width=1024, height=1024, batch_size=1
56
+ )
57
+
58
+ guided = fluxguidance.append(
59
+ guidance=3.5, conditioning=get_value_at_index(positive, 0)
60
+ )
61
+
62
+ sample = ksampler.sample(
63
+ seed=random.randint(1, 2**64),
64
+ steps=20,
65
+ cfg=1,
66
+ sampler_name="euler",
67
+ scheduler="simple",
68
+ denoise=1,
69
+ model=get_value_at_index(loraloader_12, 0),
70
+ positive=get_value_at_index(guided, 0),
71
+ negative=get_value_at_index(negative, 0),
72
+ latent_image=get_value_at_index(latent, 0),
73
+ )
74
+
75
+ decoded = vaedecode.decode(
76
+ samples=get_value_at_index(sample, 0),
77
+ vae=get_value_at_index(vae_9, 0),
78
+ )
79
+
80
+ result = get_value_at_index(decoded, 0)
81
+ return result
82
+
83
+ # Gradio UI
84
+ demo = gr.Interface(
85
+ fn=run_inference,
86
+ inputs=gr.Textbox(label="Prompt", placeholder="e.g. gjnx is driving ferrari on a road in germany"),
87
+ outputs=gr.Image(label="Generated Image"),
88
+ title="GNX Flux LoRA Generator",
89
+ description="Enter a prompt using your trained LoRA with Flux .1 Dev."
90
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  if __name__ == "__main__":
93
  demo.launch()
requirements.txt CHANGED
@@ -1,6 +1,5 @@
1
- accelerate
2
- diffusers
3
- invisible_watermark
4
  torch
 
 
5
  transformers
6
- xformers
 
 
 
 
1
  torch
2
+ gradio
3
+ comfyui
4
  transformers
5
+ diffusers