Spaces:
Running
on
Zero
Running
on
Zero
Update app_v4.py
Browse files
app_v4.py
CHANGED
|
@@ -17,6 +17,7 @@ from huggingface_hub import CommitScheduler, HfApi, logging
|
|
| 17 |
from debug import log_params, scheduler, save_image
|
| 18 |
logging.set_verbosity_debug()
|
| 19 |
from model_loader import safe_model_load
|
|
|
|
| 20 |
|
| 21 |
def hello(profile: gr.OAuthProfile | None) -> str:
|
| 22 |
if profile is None:
|
|
@@ -55,8 +56,12 @@ pipe = FluxControlNetPipeline.from_pretrained(
|
|
| 55 |
)
|
| 56 |
pipe.to("cuda")
|
| 57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
|
| 59 |
-
@spaces.GPU(duration=
|
| 60 |
@torch.no_grad()
|
| 61 |
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale, seed, guidance_end):
|
| 62 |
generator = torch.Generator().manual_seed(seed)
|
|
@@ -222,11 +227,13 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
|
|
| 222 |
msg1 = gr.Markdown()
|
| 223 |
try_btn = gr.LoginButton()
|
| 224 |
try:
|
| 225 |
-
|
|
|
|
| 226 |
cresult = client.predict(
|
| 227 |
n=3,
|
| 228 |
api_name="/predict"
|
| 229 |
)
|
|
|
|
| 230 |
print(cresult)
|
| 231 |
except:
|
| 232 |
print("Guess we're just going to have to pretend that Spaces have been broken for almost a year now..")
|
|
|
|
| 17 |
from debug import log_params, scheduler, save_image
|
| 18 |
logging.set_verbosity_debug()
|
| 19 |
from model_loader import safe_model_load
|
| 20 |
+
from huggingface_hub.utils._runtime import dump_environment_info
|
| 21 |
|
| 22 |
def hello(profile: gr.OAuthProfile | None) -> str:
|
| 23 |
if profile is None:
|
|
|
|
| 56 |
)
|
| 57 |
pipe.to("cuda")
|
| 58 |
|
| 59 |
+
try:
|
| 60 |
+
dump_environment_info()
|
| 61 |
+
except Exception as e:
|
| 62 |
+
print(f"Failed to dump env info: {e}")
|
| 63 |
|
| 64 |
+
@spaces.GPU(duration=6, progress=gr.Progress(track_tqdm=True))
|
| 65 |
@torch.no_grad()
|
| 66 |
def generate_image(prompt, scale, steps, control_image, controlnet_conditioning_scale, guidance_scale, seed, guidance_end):
|
| 67 |
generator = torch.Generator().manual_seed(seed)
|
|
|
|
| 227 |
msg1 = gr.Markdown()
|
| 228 |
try_btn = gr.LoginButton()
|
| 229 |
try:
|
| 230 |
+
x_ip_token = request.headers['x-ip-token']
|
| 231 |
+
client = Client("LPX55/zerogpu-experiments", hf_token=huggingface_token, headers={"x-ip-token": x_ip_token})
|
| 232 |
cresult = client.predict(
|
| 233 |
n=3,
|
| 234 |
api_name="/predict"
|
| 235 |
)
|
| 236 |
+
print(f"X TOKEN: {x_ip_token}")
|
| 237 |
print(cresult)
|
| 238 |
except:
|
| 239 |
print("Guess we're just going to have to pretend that Spaces have been broken for almost a year now..")
|