Update src/pipeline.py
Browse files- src/pipeline.py +2 -6
src/pipeline.py
CHANGED
|
@@ -61,10 +61,6 @@ def load_pipeline() -> Pipeline:
|
|
| 61 |
@torch.inference_mode()
|
| 62 |
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
|
| 63 |
torch.cuda.reset_peak_memory_stats()
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
|
| 67 |
-
except:
|
| 68 |
-
image = img.open("./RobertML.png")
|
| 69 |
-
pass
|
| 70 |
return(image)
|
|
|
|
| 61 |
@torch.inference_mode()
|
| 62 |
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
|
| 63 |
torch.cuda.reset_peak_memory_stats()
|
| 64 |
+
generator = Generator("cuda").manual_seed(request.seed)
|
| 65 |
+
image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
return(image)
|