xcll commited on
Commit
b1531fb
·
verified ·
1 Parent(s): 3ff2267

Upload inference_RSICD_1.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. inference_RSICD_1.py +56 -0
inference_RSICD_1.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers import FluxPipeline
3
+ import json
4
+ from diffusers import DiffusionPipeline, FluxTransformer2DModel
5
+ from torchao.quantization import quantize_, int8_weight_only
6
+ from sd_embed.embedding_funcs import get_weighted_text_embeddings_flux1
7
+ import os
8
+ import random
9
+ import gc
10
+
11
+ for w in ["1", "2", "3", "4", "5"]:
12
+ os.makedirs(f"/data/xcl/sd-scripts/result/RSICD_test/{w}k", exist_ok=True)
13
+
14
+ transformer = FluxTransformer2DModel.from_pretrained(
15
+ "/data/xcl/flux/model/flux_dev/models--black-forest-labs--FLUX.1-dev/snapshots/0ef5fff789c832c5c7f4e127f94c8b54bbcced44"
16
+ , subfolder = "transformer"
17
+ , torch_dtype = torch.bfloat16
18
+ )
19
+ # quantize_(transformer, int8_weight_only())
20
+
21
+ pipe = FluxPipeline.from_pretrained("/data/xcl/flux/model/flux_dev/models--black-forest-labs--FLUX.1-dev/snapshots/0ef5fff789c832c5c7f4e127f94c8b54bbcced44", transformer = transformer, torch_dtype=torch.bfloat16)
22
+ pipe.enable_model_cpu_offload() #save some VRAM by offloading the model to CPU. Remove this if you have enough GPU power
23
+
24
+ pipe.load_lora_weights(
25
+ f"/data/xcl/sd-scripts/output/RSICD_test/rsicd_test-step000{w}0000.safetensors"
26
+ )
27
+
28
+ with open(r"/data/xcl/dataSet/RSICD_1/test_captions.json", "r", encoding="utf-8") as file:
29
+ data = json.load(file)
30
+
31
+ for key, value in data.items():
32
+
33
+ if os.path.exists(os.path.join(f"/data/xcl/sd-scripts/result/RSICD_test/{w}k", f"{key}.png")):
34
+ print(f"跳过 {key}")
35
+ continue
36
+
37
+ prompt = value
38
+
39
+ prompt_embeds, pooled_prompt_embeds = get_weighted_text_embeddings_flux1(
40
+ pipe = pipe
41
+ , prompt = prompt
42
+ )
43
+ image = pipe(
44
+ prompt_embeds = prompt_embeds
45
+ , pooled_prompt_embeds = pooled_prompt_embeds
46
+ , width = 512
47
+ , height = 512
48
+ , num_inference_steps = 28
49
+ , guidance_scale = 3.5
50
+ , generator = torch.Generator().manual_seed(random.randint(0, 2**32 - 1))
51
+ ).images[0]
52
+ image.save(os.path.join(f"/data/xcl/sd-scripts/result/RSICD_test/{w}k", f"{key}.png"))
53
+ del transformer
54
+ del pipe
55
+ gc.collect()
56
+ torch.cuda.empty_cache()