manbeast3b commited on
Commit
4584e29
·
verified ·
1 Parent(s): 1c6f3e8

Update src/pipeline.py

Browse files
Files changed (1) hide show
  1. src/pipeline.py +4 -8
src/pipeline.py CHANGED
@@ -23,7 +23,7 @@ def empty_cache():
23
  torch.cuda.empty_cache()
24
  torch.cuda.reset_max_memory_allocated()
25
  torch.cuda.reset_peak_memory_stats()
26
- print(f"Flush took: {time.time() - start}")
27
 
28
  def load_pipeline() -> Pipeline:
29
  empty_cache()
@@ -49,7 +49,7 @@ def load_pipeline() -> Pipeline:
49
  #quantize_(vae, int8_weight_only())
50
 
51
  model = FluxTransformer2DModel.from_pretrained(
52
- "/home/sandbox/.cache/huggingface/hub/models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a", torch_dtype=dtype, use_safetensors=False
53
  )
54
  empty_cache()
55
  pipeline = DiffusionPipeline.from_pretrained(
@@ -72,10 +72,6 @@ from datetime import datetime
72
  @torch.inference_mode()
73
  def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
74
  empty_cache()
75
- try:
76
- generator = Generator(pipeline.device).manual_seed(request.seed)
77
- image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
78
- except:
79
- image = img.open("./RobertML.png")
80
- pass
81
  return(image)
 
23
  torch.cuda.empty_cache()
24
  torch.cuda.reset_max_memory_allocated()
25
  torch.cuda.reset_peak_memory_stats()
26
+ # print(f"Flush took: {time.time() - start}")
27
 
28
  def load_pipeline() -> Pipeline:
29
  empty_cache()
 
49
  #quantize_(vae, int8_weight_only())
50
 
51
  model = FluxTransformer2DModel.from_pretrained(
52
+ "/root/.cache/huggingface/hub/models--RobertML--FLUX.1-schnell-int8wo/snapshots/307e0777d92df966a3c0f99f31a6ee8957a9857a", torch_dtype=dtype, use_safetensors=False
53
  )
54
  empty_cache()
55
  pipeline = DiffusionPipeline.from_pretrained(
 
72
  @torch.inference_mode()
73
  def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
74
  empty_cache()
75
+ generator = Generator(pipeline.device).manual_seed(request.seed)
76
+ image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
 
 
 
 
77
  return(image)