Spaces:
Running
on
Zero
Running
on
Zero
Update app_v5.py
Browse files
app_v5.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
# app_v5.py
|
| 2 |
import gradio as gr
|
| 3 |
import spaces
|
| 4 |
-
|
| 5 |
import torch
|
| 6 |
from gradio_client import Client, handle_file
|
| 7 |
import os
|
|
@@ -16,18 +16,13 @@ from diffusers.utils import load_image
|
|
| 16 |
from PIL import Image
|
| 17 |
from threading import Thread
|
| 18 |
from typing import Generator
|
| 19 |
-
from huggingface_hub import CommitScheduler, HfApi
|
| 20 |
from debug import log_params, scheduler, save_image
|
| 21 |
-
logging.set_verbosity_debug()
|
| 22 |
-
from model_loader import safe_model_load
|
| 23 |
from huggingface_hub.utils._runtime import dump_environment_info
|
| 24 |
|
| 25 |
-
def hello(profile: gr.OAuthProfile | None) -> str:
|
| 26 |
-
if profile is None:
|
| 27 |
-
return "Hello guest! There is a bug with HF ZeroGPUs that are afffecting some usage on certain spaces. Testing out some possible solutions."
|
| 28 |
-
return f"You are logged in as {profile.name}. If you run into incorrect messages about ZeroGPU runtime credits being out, PLEASE give me a heads up so I can investigate further."
|
| 29 |
-
|
| 30 |
|
|
|
|
|
|
|
| 31 |
|
| 32 |
# Ensure device is set
|
| 33 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
@@ -272,6 +267,11 @@ with gr.Blocks(title="FLUX Turbo Upscaler", fill_height=True) as demo:
|
|
| 272 |
# y_offset=0,
|
| 273 |
# api_name="/infer"
|
| 274 |
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 275 |
caption_state = gr.State()
|
| 276 |
focus_state = gr.State()
|
| 277 |
log_state = gr.State()
|
|
|
|
| 1 |
# app_v5.py
|
| 2 |
import gradio as gr
|
| 3 |
import spaces
|
| 4 |
+
import logging
|
| 5 |
import torch
|
| 6 |
from gradio_client import Client, handle_file
|
| 7 |
import os
|
|
|
|
| 16 |
from PIL import Image
|
| 17 |
from threading import Thread
|
| 18 |
from typing import Generator
|
| 19 |
+
from huggingface_hub import CommitScheduler, HfApi
|
| 20 |
from debug import log_params, scheduler, save_image
|
|
|
|
|
|
|
| 21 |
from huggingface_hub.utils._runtime import dump_environment_info
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
+
logging.basicConfig(level=logging.INFO)
|
| 25 |
+
logger = logging.getLogger(__name__)
|
| 26 |
|
| 27 |
# Ensure device is set
|
| 28 |
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
| 267 |
# y_offset=0,
|
| 268 |
# api_name="/infer"
|
| 269 |
# )
|
| 270 |
+
def hello(profile: gr.OAuthProfile | None) -> str:
|
| 271 |
+
if profile is None:
|
| 272 |
+
return "Hello guest! There is a bug with HF ZeroGPUs that are afffecting some usage on certain spaces. Testing out some possible solutions."
|
| 273 |
+
return f"You are logged in as {profile.name}. If you run into incorrect messages about ZeroGPU runtime credits being out, PLEASE give me a heads up so I can investigate further."
|
| 274 |
+
|
| 275 |
caption_state = gr.State()
|
| 276 |
focus_state = gr.State()
|
| 277 |
log_state = gr.State()
|