Alexander Bagus commited on
Commit
2ef6e7d
·
1 Parent(s): 7d91422
Files changed (2) hide show
  1. app.py +33 -32
  2. requirements.txt +3 -1
app.py CHANGED
@@ -21,6 +21,7 @@ text_encoder_2 = AutoModel.from_pretrained(model_path, subfolder="text_encoder_2
21
  pipe = NewbiePipeline.from_pretrained(model_path, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16)
22
  del text_encoder_2
23
 
 
24
  # pipe = NewbiePipeline.from_pretrained(
25
  # MODEL_REPO,
26
  # torch_dtype=torch.bfloat16,
@@ -32,38 +33,38 @@ del text_encoder_2
32
  # polished_prompt = polish_prompt(prompt)
33
  # return polished_prompt, True
34
 
35
- # @spaces.GPU
36
- # def inference(
37
- # prompt,
38
- # negative_prompt="blurry ugly bad",
39
- # width=1024,
40
- # height=1024,
41
- # seed=42,
42
- # randomize_seed=True,
43
- # guidance_scale=1.5,
44
- # num_inference_steps=8,
45
- # progress=gr.Progress(track_tqdm=True),
46
- # ):
47
- # timestamp = time.time()
48
- # print(f"timestamp: {timestamp}")
49
-
50
-
51
- # # generation
52
- # if randomize_seed: seed = random.randint(0, MAX_SEED)
53
- # generator = torch.Generator().manual_seed(seed)
54
-
55
- # image = pipe(
56
- # prompt= prompt,
57
- # negative_prompt = negative_prompt,
58
- # width=width,
59
- # height=height,
60
- # generator=generator,
61
- # guidance_scale=guidance_scale,
62
- # num_inference_steps=num_inference_steps,
63
- # enable_prompt_rewrite= False
64
- # ).images[0]
65
-
66
- # return image, seed
67
 
68
 
69
  def read_file(path: str) -> str:
 
21
  pipe = NewbiePipeline.from_pretrained(model_path, text_encoder_2=text_encoder_2, torch_dtype=torch.bfloat16)
22
  del text_encoder_2
23
 
24
+
25
  # pipe = NewbiePipeline.from_pretrained(
26
  # MODEL_REPO,
27
  # torch_dtype=torch.bfloat16,
 
33
  # polished_prompt = polish_prompt(prompt)
34
  # return polished_prompt, True
35
 
36
+ @spaces.GPU
37
+ def inference(
38
+ prompt,
39
+ negative_prompt="blurry ugly bad",
40
+ width=1024,
41
+ height=1024,
42
+ seed=42,
43
+ randomize_seed=True,
44
+ guidance_scale=1.5,
45
+ num_inference_steps=8,
46
+ progress=gr.Progress(track_tqdm=True),
47
+ ):
48
+ timestamp = time.time()
49
+ print(f"timestamp: {timestamp}")
50
+
51
+
52
+ # generation
53
+ if randomize_seed: seed = random.randint(0, MAX_SEED)
54
+ generator = torch.Generator().manual_seed(seed)
55
+
56
+ image = pipe(
57
+ prompt= prompt,
58
+ negative_prompt = negative_prompt,
59
+ width=width,
60
+ height=height,
61
+ generator=generator,
62
+ guidance_scale=guidance_scale,
63
+ num_inference_steps=num_inference_steps,
64
+ enable_prompt_rewrite= False
65
+ ).images[0]
66
+
67
+ return image, seed
68
 
69
 
70
  def read_file(path: str) -> str:
requirements.txt CHANGED
@@ -4,4 +4,6 @@ transformers
4
  accelerate
5
  spaces
6
  flash-attn
7
- git+https://github.com/Disty0/diffusers
 
 
 
4
  accelerate
5
  spaces
6
  flash-attn
7
+ git+https://github.com/Disty0/diffusers
8
+ timm
9
+ torchvision