fix: seed to gnerator
Browse files- inference.py +17 -16
inference.py
CHANGED
|
@@ -40,20 +40,7 @@ class DiffusionInference:
|
|
| 40 |
}
|
| 41 |
|
| 42 |
# Handle seed parameter
|
| 43 |
-
|
| 44 |
-
try:
|
| 45 |
-
# Convert to integer and add to params
|
| 46 |
-
generator = torch.Generator(device="cuda").manual_seed(seed)
|
| 47 |
-
except (ValueError, TypeError):
|
| 48 |
-
# Use random seed if conversion fails
|
| 49 |
-
random_seed = random.randint(0, 3999999999) # Max 32-bit integer
|
| 50 |
-
generator = torch.Generator(device="cuda").manual_seed(random_seed)
|
| 51 |
-
print(f"Warning: Invalid seed value: {seed}, using random seed {random_seed} instead")
|
| 52 |
-
else:
|
| 53 |
-
# Generate random seed when none is provided
|
| 54 |
-
random_seed = random.randint(0, 3999999999) # Max 32-bit integer
|
| 55 |
-
generator = torch.Generator(device="cuda").manual_seed(random_seed)
|
| 56 |
-
print(f"Using random seed: {random_seed}")
|
| 57 |
|
| 58 |
# Add negative prompt if provided
|
| 59 |
if negative_prompt is not None:
|
|
@@ -66,7 +53,7 @@ class DiffusionInference:
|
|
| 66 |
|
| 67 |
try:
|
| 68 |
# Call the API with all parameters as kwargs
|
| 69 |
-
image = self.run_text_to_image_pipeline(model,
|
| 70 |
return image
|
| 71 |
except Exception as e:
|
| 72 |
print(f"Error generating image: {e}")
|
|
@@ -163,7 +150,21 @@ class DiffusionInference:
|
|
| 163 |
print(f"Warning: Could not delete temporary file {temp_file}: {e}")
|
| 164 |
|
| 165 |
@spaces.GPU
|
| 166 |
-
def run_text_to_image_pipeline(self, model_name,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 167 |
pipeline = AutoPipelineForText2Image.from_pretrained(model_name, generator=generator, torch_dtype=torch.float16).to("cuda")
|
| 168 |
image = pipeline(**kwargs).images[0]
|
| 169 |
return image
|
|
|
|
| 40 |
}
|
| 41 |
|
| 42 |
# Handle seed parameter
|
| 43 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
|
| 45 |
# Add negative prompt if provided
|
| 46 |
if negative_prompt is not None:
|
|
|
|
| 53 |
|
| 54 |
try:
|
| 55 |
# Call the API with all parameters as kwargs
|
| 56 |
+
image = self.run_text_to_image_pipeline(model, seed, **params)
|
| 57 |
return image
|
| 58 |
except Exception as e:
|
| 59 |
print(f"Error generating image: {e}")
|
|
|
|
| 150 |
print(f"Warning: Could not delete temporary file {temp_file}: {e}")
|
| 151 |
|
| 152 |
@spaces.GPU
|
| 153 |
+
def run_text_to_image_pipeline(self, model_name, seed, **kwargs):
|
| 154 |
+
if seed is not None:
|
| 155 |
+
try:
|
| 156 |
+
# Convert to integer and add to params
|
| 157 |
+
generator = torch.Generator(device="cuda").manual_seed(seed)
|
| 158 |
+
except (ValueError, TypeError):
|
| 159 |
+
# Use random seed if conversion fails
|
| 160 |
+
random_seed = random.randint(0, 3999999999) # Max 32-bit integer
|
| 161 |
+
generator = torch.Generator(device="cuda").manual_seed(random_seed)
|
| 162 |
+
print(f"Warning: Invalid seed value: {seed}, using random seed {random_seed} instead")
|
| 163 |
+
else:
|
| 164 |
+
# Generate random seed when none is provided
|
| 165 |
+
random_seed = random.randint(0, 3999999999) # Max 32-bit integer
|
| 166 |
+
generator = torch.Generator(device="cuda").manual_seed(random_seed)
|
| 167 |
+
print(f"Using random seed: {random_seed}")
|
| 168 |
pipeline = AutoPipelineForText2Image.from_pretrained(model_name, generator=generator, torch_dtype=torch.float16).to("cuda")
|
| 169 |
image = pipeline(**kwargs).images[0]
|
| 170 |
return image
|