X-HighVoltage-X commited on
Commit
aa95e47
·
verified ·
1 Parent(s): 732c6e6

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +280 -0
  2. loras.py +190 -0
  3. readme.md +13 -0
  4. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import gradio as gr
4
+ import numpy as np
5
+ import spaces
6
+ import torch
7
+ from diffusers import FluxFillPipeline
8
+ from loras import LoRA, loras
9
+ from PIL import Image
10
+
11
+ MAX_SEED = np.iinfo(np.int32).max
12
+
13
+ pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16)
14
+
15
+ flux_keywords_available = ["IMG_1025.HEIC", "Selfie"]
16
+
17
+ # --- LATENT MANIPULATION FUNCTIONS ---
18
+ def pack_latents(latents, batch_size, num_channels, height, width):
19
+ latents = latents.view(batch_size, num_channels, height // 2, 2, width // 2, 2)
20
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
21
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels * 4)
22
+ return latents
23
+
24
+
25
+ def unpack_latents(latents, height, width, h_scale=2, w_scale=2):
26
+ batch_size, seq_len, channels = latents.shape
27
+ # Flux uses a 2x2 patch, so the factor is 2
28
+ latents = latents.view(
29
+ batch_size, height // h_scale, width // w_scale, channels // (h_scale * w_scale), h_scale, w_scale
30
+ )
31
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
32
+ latents = latents.reshape(batch_size, channels // (h_scale * w_scale), height, width)
33
+ return latents
34
+
35
+
36
+ # --- CALLBACK (PRESERVED AREA + STEP CAPTURE) ---
37
+ def get_gradual_blend_callback(
38
+ pipe, original_image, preserved_area_mask, total_steps, step_images_list, start_alpha=1.0, end_alpha=0.2
39
+ ):
40
+ device = pipe.device
41
+ dtype = pipe.transformer.dtype
42
+
43
+ with torch.no_grad():
44
+ # Prepare original image
45
+ img_tensor = (
46
+ (torch.from_numpy(np.array(original_image).transpose(2, 0, 1)).float() / 127.5 - 1.0)
47
+ .unsqueeze(0)
48
+ .to(device, dtype)
49
+ )
50
+ init_latents = pipe.vae.encode(img_tensor).latent_dist.sample()
51
+ init_latents = (init_latents - pipe.vae.config.shift_factor) * pipe.vae.config.scaling_factor
52
+
53
+ # Dimensions in latent space
54
+ _, _, h_latent, w_latent = init_latents.shape
55
+
56
+ # Pack original latents (64 channels)
57
+ packed_init_latents = pack_latents(init_latents, batch_size=1, num_channels=16, height=h_latent, width=w_latent)
58
+
59
+ # Prepare and pack the preserved area mask (4 channels)
60
+ mask_tensor = (
61
+ (torch.from_numpy(np.array(preserved_area_mask.convert("L"))).float() / 255.0)
62
+ .unsqueeze(0)
63
+ .unsqueeze(0)
64
+ .to(device, dtype)
65
+ )
66
+ latent_preserved_mask = torch.nn.functional.interpolate(mask_tensor, size=(h_latent, w_latent), mode="nearest")
67
+ packed_preserved_mask = pack_latents(
68
+ latent_preserved_mask, batch_size=1, num_channels=1, height=h_latent, width=w_latent
69
+ )
70
+
71
+ def callback_fn(pipe, step, timestep, callback_kwargs):
72
+ latents = callback_kwargs["latents"]
73
+
74
+ # A. Preserved Area Logic
75
+ progress = step / max(1, total_steps - 1)
76
+ current_alpha = start_alpha - (start_alpha - end_alpha) * progress
77
+
78
+ # We use .repeat(1, 1, 16) so the 4 mask channels affect the 64 latent channels
79
+ effective_mask_64 = (packed_preserved_mask * current_alpha).repeat(1, 1, 16)
80
+ latents = (1 - effective_mask_64) * latents + effective_mask_64 * packed_init_latents
81
+
82
+ # B. Step Capture (Save an image every 5 steps to save GPU memory)
83
+ if step % 5 == 0 or step == total_steps - 1:
84
+ with torch.no_grad():
85
+ unpacked = unpack_latents(latents, h_latent, w_latent)
86
+ unpacked = (unpacked / pipe.vae.config.scaling_factor) + pipe.vae.config.shift_factor
87
+
88
+ # Decode and convert to PIL image
89
+ decoded = pipe.vae.decode(unpacked.to(pipe.vae.dtype)).sample
90
+ img_step = pipe.image_processor.postprocess(decoded, output_type="pil")[0]
91
+ step_images_list.append(img_step)
92
+
93
+ callback_kwargs["latents"] = latents
94
+ return callback_kwargs
95
+
96
+ return callback_fn
97
+
98
+
99
+ # --- LoRA's FUNCTIONS ---
100
+ def activate_loras(pipe: FluxFillPipeline, loras_with_weights: list[tuple[LoRA, float]]):
101
+ adapter_names = []
102
+ adapter_weights = []
103
+ for lora, weight in loras_with_weights:
104
+ pipe.load_lora_weights(lora.id, weight=weight, adapter_name=lora.name)
105
+ adapter_names.append(lora.name)
106
+ adapter_weights.append(weight)
107
+ pipe.set_adapters(adapter_names, adapter_weights=adapter_weights)
108
+ return pipe
109
+
110
+
111
+ def deactivate_loras(pipe):
112
+ pipe.unload_lora_weights()
113
+ return pipe
114
+
115
+
116
+ # --- GENERATION
117
+ def calculate_optimal_dimensions(image):
118
+ original_width, original_height = image.size
119
+ FIXED_DIMENSION = 1024
120
+ aspect_ratio = original_width / original_height
121
+ if aspect_ratio > 1:
122
+ width, height = FIXED_DIMENSION, round(FIXED_DIMENSION / aspect_ratio)
123
+ else:
124
+ height, width = FIXED_DIMENSION, round(FIXED_DIMENSION * aspect_ratio)
125
+ return (width // 8) * 8, (height // 8) * 8
126
+
127
+
128
+ @spaces.GPU(duration=60)
129
+ def inpaint(
130
+ image,
131
+ mask,
132
+ preserved_area_mask=None,
133
+ prompt: str = "",
134
+ seed: int = 0,
135
+ num_inference_steps: int = 28,
136
+ guidance_scale: int = 50,
137
+ strength: float = 1.0,
138
+ ):
139
+ image = image.convert("RGB")
140
+ mask = mask.convert("L")
141
+ width, height = calculate_optimal_dimensions(image)
142
+
143
+ # Resize to match dimensions
144
+ image_resized = image.resize((width, height), Image.LANCZOS)
145
+
146
+ pipe.to("cuda")
147
+
148
+ # Setup callback if a preserved area mask is provided
149
+ step_images = []
150
+ callback = None
151
+ if preserved_area_mask is not None:
152
+ preserved_area_resized = preserved_area_mask.resize((width, height), Image.NEAREST)
153
+ callback = get_gradual_blend_callback(
154
+ pipe, image_resized, preserved_area_resized, num_inference_steps, step_images
155
+ )
156
+
157
+ result = pipe(
158
+ image=image_resized,
159
+ mask_image=mask.resize((width, height)),
160
+ prompt=prompt,
161
+ width=width,
162
+ height=height,
163
+ num_inference_steps=num_inference_steps,
164
+ guidance_scale=guidance_scale,
165
+ strength=strength,
166
+ generator=torch.Generator().manual_seed(seed),
167
+ callback_on_step_end=callback,
168
+ callback_on_step_end_tensor_inputs=["latents"] if callback else None,
169
+ ).images[0]
170
+
171
+ return result.convert("RGBA"), step_images, prompt, seed
172
+
173
+
174
+ def inpaint_api(
175
+ image,
176
+ mask,
177
+ preserved_area_mask,
178
+ prompt: str,
179
+ seed: int,
180
+ num_inference_steps: int,
181
+ guidance_scale: int,
182
+ strength: float,
183
+ flux_keywords: list[str] = None,
184
+ loras_selected: list[tuple[str, float]] = None,
185
+ ):
186
+ flux_keywords = flux_keywords or []
187
+ loras_selected = loras_selected or []
188
+ selected_loras_with_weights = []
189
+
190
+ for name, weight_value in loras_selected:
191
+ try:
192
+ weight = float(weight_value)
193
+ except:
194
+ continue
195
+ lora_obj = next((l for l in loras if l.display_name == name), None)
196
+ if lora_obj and weight != 0.0:
197
+ selected_loras_with_weights.append((lora_obj, weight))
198
+
199
+ deactivate_loras(pipe)
200
+ if selected_loras_with_weights:
201
+ activate_loras(pipe, selected_loras_with_weights)
202
+
203
+ final_prompt = ""
204
+ if flux_keywords:
205
+ final_prompt += ", ".join(flux_keywords) + ", "
206
+ for lora, _ in selected_loras_with_weights:
207
+ if lora.keyword:
208
+ final_prompt += (lora.keyword if isinstance(lora.keyword, str) else ", ".join(lora.keyword)) + ", "
209
+ final_prompt += prompt
210
+
211
+ if not isinstance(seed, int) or seed < 0:
212
+ seed = random.randint(0, MAX_SEED)
213
+
214
+ return inpaint(
215
+ image=image,
216
+ mask=mask,
217
+ preserved_area_mask=preserved_area_mask,
218
+ prompt=final_prompt,
219
+ seed=seed,
220
+ num_inference_steps=num_inference_steps,
221
+ guidance_scale=guidance_scale,
222
+ strength=strength,
223
+ )
224
+
225
+
226
+ with gr.Blocks(title="FLUX.1 Fill dev + Area Preservation", theme=gr.themes.Soft()) as demo:
227
+ with gr.Row():
228
+ with gr.Column(scale=2):
229
+ prompt_input = gr.Text(label="Prompt", lines=4, value="a 25 years old woman")
230
+ seed_slider = gr.Slider(label="Seed", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
231
+ num_inference_steps_input = gr.Number(label="Inference steps", value=40)
232
+ guidance_scale_input = gr.Number(label="Guidance scale", value=30)
233
+ strength_input = gr.Number(label="Strength", value=1.0, maximum=1.0)
234
+
235
+ gr.Markdown("### Flux Keywords")
236
+ flux_keywords_input = gr.CheckboxGroup(choices=flux_keywords_available, label="Flux Keywords")
237
+
238
+ if loras:
239
+ gr.Markdown("### Available LoRAs")
240
+ lora_names = [l.display_name for l in loras]
241
+ loras_selected_input = gr.Dataframe(
242
+ type="array",
243
+ headers=["LoRA", "Weight"],
244
+ value=[[name, 0.0] for name in lora_names],
245
+ datatype=["str", "number"],
246
+ interactive=[False, True],
247
+ label="LoRA selection",
248
+ )
249
+
250
+ with gr.Column(scale=3):
251
+ image_input = gr.Image(label="Original Image", type="pil")
252
+ mask_input = gr.Image(label="Inpaint Mask (Area to change)", type="pil")
253
+ preserved_area_input = gr.Image(label="Preserved Area Mask (Area to keep)", type="pil")
254
+ run_btn = gr.Button("Generate", variant="primary")
255
+
256
+ with gr.Column(scale=3):
257
+ result_image = gr.Image(label="Result")
258
+ used_prompt_box = gr.Text(label="Final Prompt")
259
+ used_seed_box = gr.Number(label="Used Seed")
260
+ steps_gallery = gr.Gallery(label="Evolution (Steps)", columns=3, preview=True)
261
+
262
+ run_btn.click(
263
+ fn=inpaint_api,
264
+ inputs=[
265
+ image_input,
266
+ mask_input,
267
+ preserved_area_input,
268
+ prompt_input,
269
+ seed_slider,
270
+ num_inference_steps_input,
271
+ guidance_scale_input,
272
+ strength_input,
273
+ flux_keywords_input,
274
+ loras_selected_input,
275
+ ],
276
+ outputs=[result_image, steps_gallery, used_prompt_box, used_seed_box],
277
+ )
278
+
279
+ if __name__ == "__main__":
280
+ demo.launch()
loras.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from dataclasses import field
2
+ from typing import List
3
+
4
+ from pydantic import BaseModel
5
+
6
+
7
+ class LoRA(BaseModel):
8
+
9
+ id: str
10
+ name: str
11
+ display_name: str
12
+ url: str | None = None
13
+ keyword: str = None
14
+ all_keywords: List[str] = field(default_factory=list)
15
+ note: str | None = None
16
+
17
+
18
+ loras = [
19
+ LoRA(
20
+ id="X-HighVoltage-X/Flux-Kontext-Makeup-remover",
21
+ name="Flux Kontext Makeup remover",
22
+ display_name="Flux Kontext Makeup remover v1.0",
23
+ keyword="Remove makeup of this person",
24
+ url="https://civitai.com/models/1859952/flux-kontext-makeup-remover",
25
+ note=(
26
+ "This model is a Flux Kontext LoRA trained with the AI-toolkit. "
27
+ "It was trained on 70 image pairs, with around 80% of them featuring Asian subjects. "
28
+ "However, since Kontext LoRA learns concepts rather than specific faces, "
29
+ "it works well with various ethnicities."
30
+ "I recommend using a LoRA strength of 1 for best results."
31
+ )
32
+ ),
33
+ LoRA(
34
+ id="black-forest-labs/FLUX.1-Canny-dev-lora",
35
+ name="Canny dev LoRA",
36
+ display_name="Canny dev LoRA",
37
+ url="https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev-lora"
38
+ ),
39
+ LoRA(
40
+ id="strangerzonehf/Flux-Super-Realism-LoRA",
41
+ name="Super Realism",
42
+ display_name="Super Realism",
43
+ keyword="Super Realism",
44
+ url="https://huggingface.co/strangerzonehf/Flux-Super-Realism-LoRA",
45
+ note=(
46
+ "The trigger word is not mandatory; ensure that words like "
47
+ 'realistic" and "realism" appear in the image description. '
48
+ 'The "super realism" trigger word should prompt an exact match '
49
+ 'to the reference image in the showcase.'
50
+ )
51
+ ),
52
+ LoRA(
53
+ id="ujouy/Amateur_Photography_FluxDev",
54
+ name="Amateur Photography",
55
+ display_name="Amateur Photography v6.0",
56
+ url="https://civitai.com/models/652699/amateur-photography-flux-dev",
57
+ note=(
58
+ "Recommended Settings (v6)\n"
59
+ "Distilled CFG Scale: 3.5\n"
60
+ "Sampling method and Schedule type: DEIS with DDIM\n"
61
+ "Steps: 20\n"
62
+ "Resolution: 896x1152\n"
63
+ "Hires fix model: 4x_NMKD-Superscale-SP_178000_G\n"
64
+ "Steps: 10\n"
65
+ "Denoise: 0.3\n"
66
+ "Upscale by: 1.5\n"
67
+ "Lora Weight: 0.8. You have to experiment based on your prompts\n"
68
+ )
69
+
70
+ ),
71
+ LoRA(
72
+ id="VideoAditor/Flux-Lora-Realism",
73
+ name="Flux Lora Realism",
74
+ display_name="Flux Lora Realism",
75
+ url="https://huggingface.co/VideoAditor/Flux-Lora-Realism"
76
+ ),
77
+ LoRA(
78
+ id="strangerzonehf/Flux-SuperPortrait-v2-LoRA",
79
+ name="Super Portrait v2",
80
+ display_name="Super Portrait v2",
81
+ keyword="Super Portrait v2",
82
+ url="https://huggingface.co/strangerzonehf/Flux-SuperPortrait-v2-LoRA",
83
+ note=(
84
+ "Best Dimensions & Inference\n"
85
+ "Dimension: 1280 x 832, Aspect Ratio: 3:2, Recommendation: Best\n"
86
+ "Dimension: 1024 x 1024, Aspect Ratio: 1:1, Recommendation: Default\n"
87
+ "Inference Range\n"
88
+ "Recommended Inference Steps: 30–35"
89
+ )
90
+ ),
91
+ LoRA(
92
+ id="fofr/kontext-make-person-real",
93
+ name="kontext make person real",
94
+ display_name="kontext make person real",
95
+ keyword="make this person look real",
96
+ url="https://huggingface.co/fofr/kontext-make-person-real"
97
+ ),
98
+ LoRA(
99
+ id="prithivMLmods/Castor-Character-Polygon-Flux-LoRA",
100
+ name="Castor Character Polygon Flux LoRA",
101
+ display_name="Castor Character Polygon Flux LoRA",
102
+ keyword="Create a hyper-realistic 3D polygon character portrait of",
103
+ url="https://huggingface.co/prithivMLmods/Castor-Character-Polygon-Flux-LoRA",
104
+ note=(
105
+ "You should use 3D Polygon to trigger the image generation.\n"
106
+ "You should use 3D Polygon Character to trigger the image generation.\n"
107
+ )
108
+ ),
109
+ LoRA(
110
+ id="X-HighVoltage-X/chinfixer-2000",
111
+ name="Chin Fixer 2000",
112
+ display_name="Chin Fixer 2000 v3.0",
113
+ keyword="ChinFixer-2000 style",
114
+ url="https://civitai.com/models/775002/chin-fixer-2000",
115
+ note=(
116
+ "More trigger words:\n"
117
+ "chin, cleft chin, jawbone, bottom of head\n"
118
+ "Try with <lora:Flux\chinfixer-2000.safetensors:1.0:1.0> at the end of prompt"
119
+ )
120
+ ),
121
+ LoRA(
122
+ id="X-HighVoltage-X/cultures-flux-v3",
123
+ name="better faces cultures v3",
124
+ display_name="better faces cultures v3",
125
+ url="https://civitai.com/models/119376?modelVersionId=1228102",
126
+ notes=(
127
+ "include a diverse range of cultures and ethnicities.\n"
128
+ "aborigines, african, arab, arctic, brazilian, chinese, egyptian, finish\n"
129
+ "german, havaiian, indian, japanese, mongolian, russian, western.\n"
130
+ "responds to age prompts like __yo for 20yo, 30yo, 40yo.\n"
131
+ "Weight: start with 0.3"
132
+ )
133
+ ),
134
+ LoRA(
135
+ id="X-HighVoltage-X/cultures-Portait-FLUX",
136
+ name="better faces cultures FLUX Portait",
137
+ display_name="better faces cultures FLUX Portait",
138
+ url="https://civitai.com/models/119376?modelVersionId=779782",
139
+ notes=(
140
+ "include a diverse range of cultures and ethnicities.\n"
141
+ "aborigines, african, arab, arctic, brazilian, chinese, egyptian, finish\n"
142
+ "german, havaiian, indian, japanese, mongolian, russian, western.\n"
143
+ "responds to age prompts like __yo for 20yo, 30yo, 40yo.\n"
144
+ "Weight: start with 0.3"
145
+ )
146
+ ),
147
+ LoRA(
148
+ id="X-HighVoltage-X/Black-Hair-Blue-Eyes-Pale-Skin-v1.0",
149
+ name="Black Hair Blue Eyes Pale Skin v1",
150
+ display_name="Black Hair Blue Eyes Pale Skin v1",
151
+ url="https://civitai.com/models/928269/black-hair-blue-eyes-pale-skin",
152
+ keyword="BHBEPS"
153
+ ),
154
+ LoRA(
155
+ id="X-HighVoltage-X/Long-hair-LoRA-V3",
156
+ name="Long hair LoRA V3",
157
+ display_name="Long hair LoRA V3",
158
+ url="https://civitai.com/models/669029/long-hair-lora-flux"
159
+ ),
160
+ LoRA(
161
+ id="X-HighVoltage-X/Long-Hair-beautiful-Blond-v1.0",
162
+ name="Long Hair beautiful Blond v1",
163
+ display_name="Long Hair beautiful Blond v1",
164
+ url="https://civitai.com/models/1685872/long-hair-beautiful-blond",
165
+ keyword="longhairblond1"
166
+ ),
167
+ LoRA(
168
+ id="X-HighVoltage-X/Gabri3lla-TS-v1.0",
169
+ name="Gabri3lla TS v1",
170
+ display_name="Gabri3lla TS v1",
171
+ url="https://civitai.com/models/1218749/gabri3lla-ts",
172
+ keyword="G@bri3ll@ Dark hair"
173
+ ),
174
+ LoRA(
175
+ id="X-HighVoltage-X/Nobody_11-Arabic-Female-Uncensored-v1.0",
176
+ name="Nobody_11 - Arabic Female - Uncensored v1",
177
+ display_name="Nobody_11 - Arabic Female - Uncensored v1",
178
+ url="https://civitai.com/models/1762729/nobody11-arabic-female-uncensored-flux1d",
179
+ keyword="a woman"
180
+ ),
181
+
182
+
183
+ LoRA(
184
+ id="X-HighVoltage-X/No-Shine", name="No Shine", display_name="No Shine"
185
+ ),
186
+ LoRA(
187
+ id="X-HighVoltage-X/sameface-fix-flux-lora", name="SameFace Fix Flux Lora", display_name="SameFace Fix Flux Lora",
188
+ keyword="woman"
189
+ ),
190
+ ]
readme.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Flux.1 Fill Dev Inpainting Super Realism LoRA
3
+ emoji: 📉
4
+ colorFrom: green
5
+ colorTo: gray
6
+ sdk: gradio
7
+ sdk_version: 5.39.0
8
+ app_file: app.py
9
+ pinned: false
10
+ short_description: Flux.1-Fill-dev Inpainting with Super Realism LoRA
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ transformers==4.49
2
+ diffusers==0.35.1
3
+ accelerate
4
+ peft
5
+ spaces
6
+ sentencepiece
7
+ safetensors
8
+ torch
9
+ scipy