Spaces:
Running
on
Zero
Running
on
Zero
minor: tweaks
Browse files
app_v4.py
CHANGED
|
@@ -13,18 +13,13 @@ from diffusers.utils import load_image
|
|
| 13 |
from PIL import Image
|
| 14 |
from threading import Thread
|
| 15 |
from typing import Generator
|
| 16 |
-
from huggingface_hub import CommitScheduler, HfApi
|
| 17 |
from debug import log_params, scheduler, save_image
|
| 18 |
-
logging.set_verbosity_debug()
|
| 19 |
-
from model_loader import safe_model_load
|
| 20 |
from huggingface_hub.utils._runtime import dump_environment_info
|
| 21 |
|
| 22 |
-
def hello(profile: gr.OAuthProfile | None) -> str:
|
| 23 |
-
if profile is None:
|
| 24 |
-
return "Hello guest! There is a bug with HF ZeroGPUs that are afffecting some usage on certain spaces. Testing out some possible solutions."
|
| 25 |
-
return f"You are logged in as {profile.name}. If you run into incorrect messages about ZeroGPU runtime credits being out, PLEASE give me a heads up so I can investigate further."
|
| 26 |
-
|
| 27 |
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# Ensure device is set
|
| 30 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -295,5 +290,12 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
|
|
| 295 |
inputs=[caption_state, focus_state],
|
| 296 |
outputs=[prompt]
|
| 297 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 298 |
demo.load(hello, inputs=None, outputs=msg1)
|
| 299 |
demo.queue().launch(show_error=True)
|
|
|
|
| 13 |
from PIL import Image
|
| 14 |
from threading import Thread
|
| 15 |
from typing import Generator
|
| 16 |
+
from huggingface_hub import CommitScheduler, HfApi
|
| 17 |
from debug import log_params, scheduler, save_image
|
|
|
|
|
|
|
| 18 |
from huggingface_hub.utils._runtime import dump_environment_info
|
| 19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
logging.basicConfig(level=logging.DEBUG)
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
|
| 24 |
# Ensure device is set
|
| 25 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 290 |
inputs=[caption_state, focus_state],
|
| 291 |
outputs=[prompt]
|
| 292 |
)
|
| 293 |
+
|
| 294 |
+
def hello(profile: gr.OAuthProfile | None) -> str:
|
| 295 |
+
if profile is None:
|
| 296 |
+
return "Hello guest! There is a bug with HF ZeroGPUs that are afffecting some usage on certain spaces. Testing out some possible solutions."
|
| 297 |
+
return f"You are logged in as {profile.name}. If you run into incorrect messages about ZeroGPU runtime credits being out, PLEASE give me a heads up so I can investigate further."
|
| 298 |
+
|
| 299 |
+
|
| 300 |
demo.load(hello, inputs=None, outputs=msg1)
|
| 301 |
demo.queue().launch(show_error=True)
|