DreamingOracle commited on
Commit
3dc4c5d
·
verified ·
1 Parent(s): 8c76a12

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -11,16 +11,16 @@ from PIL import Image, ImageFilter, ImageOps
11
  from huggingface_hub import login, hf_hub_download
12
  import os
13
 
14
- # Log in to Hugging Face for private repo access
15
  if "HF_TOKEN" in os.environ:
16
  login(os.environ["HF_TOKEN"])
17
  else:
18
  raise ValueError("HF_TOKEN not found in environment variables. Please set it in Space settings.")
19
 
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
21
- # Download the private model file
22
- repo_id = "DreamingOracle/Quagmaform_alpha-1" # Your private repo
23
- filename = "DPS_Quagmaform_Alpha1.safetensors" # Your model file
24
  model_path = hf_hub_download(repo_id=repo_id, filename=filename)
25
  if torch.cuda.is_available():
26
  torch_dtype = torch.float16
@@ -129,8 +129,8 @@ def infer(
129
 
130
  examples = [
131
  "photorealistic portrait of a young woman, cinematic rim lighting, soft golden hour backlight, detailed skin pores, realistic eyelashes, 85mm lens, shallow depth of field, ultra-detailed, high dynamic range, film grain, detailed, 8k",
132
- "full body portrait of a futuristic armored soldier, worn brushed metal armor with neon blue accents, realistic cloth under-armor, weathering and scratches, volumetric rim light, cinematic pose, high detail, photoreal",
133
- "neon cyberpunk street at night, wet pavement reflecting lights, pedestrians with umbrellas, dense signage in the distance, cinematic composition, realistic depth, crisp details, atmospheric fog, long lens compression",]
134
 
135
  css = """#col-container { margin: 0 auto; max-width: 640px;}"""
136
 
@@ -153,17 +153,17 @@ with gr.Blocks(css=css) as demo:
153
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
154
  with gr.Row():
155
  width = gr.Slider(
156
- label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768, # Replace with defaults that work for your model
157
  )
158
  height = gr.Slider(
159
- label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024, # Replace with defaults that work for your model
160
  )
161
  with gr.Row():
162
  guidance_scale = gr.Slider(
163
- label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=5, # Replace with defaults that work for your model
164
  )
165
  num_inference_steps = gr.Slider(
166
- label="Number of inference steps", minimum=1, maximum=50, step=1, value=22, # Replace with defaults that work for your model
167
  )
168
  scheduler = gr.Dropdown(
169
  label="Sampler/Scheduler",
 
11
  from huggingface_hub import login, hf_hub_download
12
  import os
13
 
14
+ # Log auth config
15
  if "HF_TOKEN" in os.environ:
16
  login(os.environ["HF_TOKEN"])
17
  else:
18
  raise ValueError("HF_TOKEN not found in environment variables. Please set it in Space settings.")
19
 
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
21
+ # dl
22
+ repo_id = "DreamingOracle/Quagmaform_alpha-1" # dl proprietary
23
+ filename = "DPS_Quagmaform_Alpha1.safetensors" # dl from source proprietary
24
  model_path = hf_hub_download(repo_id=repo_id, filename=filename)
25
  if torch.cuda.is_available():
26
  torch_dtype = torch.float16
 
129
 
130
  examples = [
131
  "photorealistic portrait of a young woman, cinematic rim lighting, soft golden hour backlight, detailed skin pores, realistic eyelashes, 85mm lens, shallow depth of field, ultra-detailed, high dynamic range, film grain, detailed, 8k",
132
+ "head helmet portrait of a futuristic armored soldier, worn brushed metal armor with neon blue accents, realistic cloth under-armor, weathering and scratches, volumetric rim light, cinematic pose, high detail, photoreal",
133
+ "silver bars stacked in a metal case, neon cyberpunk theme, rainbow glare effect, cinematic composition, realistic depth, crisp details",]
134
 
135
  css = """#col-container { margin: 0 auto; max-width: 640px;}"""
136
 
 
153
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
154
  with gr.Row():
155
  width = gr.Slider(
156
+ label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=768,
157
  )
158
  height = gr.Slider(
159
+ label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024,
160
  )
161
  with gr.Row():
162
  guidance_scale = gr.Slider(
163
+ label="Guidance scale", minimum=0.0, maximum=10.0, step=0.1, value=5,
164
  )
165
  num_inference_steps = gr.Slider(
166
+ label="Number of inference steps", minimum=1, maximum=50, step=1, value=22,
167
  )
168
  scheduler = gr.Dropdown(
169
  label="Sampler/Scheduler",