Update src/pipeline.py
Browse files- src/pipeline.py +2 -6
src/pipeline.py
CHANGED
|
@@ -67,10 +67,6 @@ def load_pipeline() -> Pipeline:
|
|
| 67 |
@torch.inference_mode()
|
| 68 |
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
|
| 69 |
torch.cuda.reset_peak_memory_stats()
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
image=pipeline(request.prompt,generator=generator, guidance_scale=0.1, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
|
| 73 |
-
except:
|
| 74 |
-
image = img.open("./RobertML.png")
|
| 75 |
-
pass
|
| 76 |
return(image)
|
|
|
|
| 67 |
@torch.inference_mode()
|
| 68 |
def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
|
| 69 |
torch.cuda.reset_peak_memory_stats()
|
| 70 |
+
generator = Generator("cuda").manual_seed(request.seed)
|
| 71 |
+
image=pipeline(request.prompt,generator=generator, guidance_scale=0.1, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
return(image)
|