Spaces:
Sleeping
Sleeping
File size: 7,012 Bytes
d6985b8 e2787ae 2d96f15 fae50c6 2d96f15 f38e394 2d96f15 f38e394 2d96f15 e2787ae 2d0a986 d6985b8 e2787ae 2d96f15 1d121ab 2d96f15 eebca9b 1d121ab 2d96f15 e2787ae 2d96f15 e2787ae 2d96f15 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 |
THIS_REPO = "Civarchivist/test_progress"
import gradio as gr
import numpy as np
import random
# # import spaces #[uncomment to use ZeroGPU]
# from diffusers import DiffusionPipeline
# import torch
# device = "cuda" if torch.cuda.is_available() else "cpu"
# model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
# if torch.cuda.is_available():
# torch_dtype = torch.float16
# else:
# torch_dtype = torch.float32
# pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
# pipe = pipe.to(device)
# MAX_SEED = np.iinfo(np.int32).max
# MAX_IMAGE_SIZE = 1024
# # @spaces.GPU #[uncomment to use ZeroGPU]
# def infer(
# prompt,
# negative_prompt,
# seed,
# randomize_seed,
# width,
# height,
# guidance_scale,
# num_inference_steps,
# progress=gr.Progress(track_tqdm=True),
# ):
# if randomize_seed:
# seed = random.randint(0, MAX_SEED)
# generator = torch.Generator().manual_seed(seed)
# image = pipe(
# prompt=prompt,
# negative_prompt=negative_prompt,
# guidance_scale=guidance_scale,
# num_inference_steps=num_inference_steps,
# width=width,
# height=height,
# generator=generator,
# ).images[0]
# return image, seed
# examples = [
# "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
# "An astronaut riding a green horse",
# "A delicious ceviche cheesecake slice",
# ]
# css = """
# #col-container {
# margin: 0 auto;
# max-width: 640px;
# }
# """
# with gr.Blocks(css=css) as demo:
# with gr.Column(elem_id="col-container"):
# gr.Markdown(" # Text-to-Image Gradio Template")
# with gr.Row():
# prompt = gr.Text(
# label="Prompt",
# show_label=False,
# max_lines=1,
# placeholder="Enter your prompt",
# container=False,
# )
# run_button = gr.Button("Run", scale=0, variant="primary")
# result = gr.Image(label="Result", show_label=False)
# with gr.Accordion("Advanced Settings", open=False):
# negative_prompt = gr.Text(
# label="Negative prompt",
# max_lines=1,
# placeholder="Enter a negative prompt",
# visible=False,
# )
# seed = gr.Slider(
# label="Seed",
# minimum=0,
# maximum=MAX_SEED,
# step=1,
# value=0,
# )
# randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
# with gr.Row():
# width = gr.Slider(
# label="Width",
# minimum=256,
# maximum=MAX_IMAGE_SIZE,
# step=32,
# value=1024, # Replace with defaults that work for your model
# )
# height = gr.Slider(
# label="Height",
# minimum=256,
# maximum=MAX_IMAGE_SIZE,
# step=32,
# value=1024, # Replace with defaults that work for your model
# )
# with gr.Row():
# guidance_scale = gr.Slider(
# label="Guidance scale",
# minimum=0.0,
# maximum=10.0,
# step=0.1,
# value=0.0, # Replace with defaults that work for your model
# )
# num_inference_steps = gr.Slider(
# label="Number of inference steps",
# minimum=1,
# maximum=50,
# step=1,
# value=2, # Replace with defaults that work for your model
# )
# gr.Examples(examples=examples, inputs=[prompt])
# gr.on(
# triggers=[run_button.click, prompt.submit],
# fn=infer,
# inputs=[
# prompt,
# negative_prompt,
# seed,
# randomize_seed,
# width,
# height,
# guidance_scale,
# num_inference_steps,
# ],
# outputs=[result, seed],
# )
import requests
import os
import gradio as gr
from huggingface_hub import update_repo_visibility, whoami, upload_folder, create_repo, upload_file, hf_hub_download, update_repo_visibility, file_exists, list_models
import subprocess
import gradio as gr
import re
import uuid
from typing import Optional
import json
import time
from pathlib import Path
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import Repository, HfApi
api = HfApi()
def slowly_reverse(word, progress=gr.Progress()):
progress(0, desc="Starting\nPrepare yourself to be obliterated\nFilthy human")
time.sleep(1)
progress(0.05)
new_string = ""
eh = "start"
for letter in progress.tqdm(word, desc="Reversing" + eh):
time.sleep(0.25)
new_string = letter + new_string
eh = new_string
progress(0, desc="You fool! This isn't even my final form!")
time.sleep(5)
progress(0.5, desc="You fool! This isn't even my final form!")
return new_string
#demo = gr.Interface(slowly_reverse, gr.Text(), gr.Text())
css = '''
#login {
width: 100% !important;
margin: 0 auto;
}
#disabled_upload{
opacity: 0.5;
pointer-events:none;
}
.error-log {
max-height: 300px;
overflow-y: auto;
background-color: #f8d7da;
padding: 10px;
border-radius: 5px;
margin-top: 10px;
}
'''
error_log = []
def log_error(message):
error_log.append(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {message}")
return "\n".join(error_log[-10:]) # Show last 10 errors
def restart_space():
try:
api.restart_space(repo_id=THIS_REPO, token=os.environ["HF_TOKEN"])
except Exception as e:
return log_error(f"Error restarting space: {str(e)}")
with gr.Blocks(css=css) as demo:
with gr.Column():
input_text = gr.Text("Whatever", interactive = True)
output_text = gr.Text("Output")
submit_btn = gr.Button("Upload to Hugging Face", interactive=True)
upload_progress = gr.Progress(0)
output = gr.Markdown(label="Upload Progress")
def run_test(word, progress = gr.Progress()):
vout = slowly_reverse(word, progress)
return None, vout
submit_btn.click(
fn=run_test,
# inputs=[input_text, upload_progress], # Does not work, progress has no id and therefore cannot be used an input.
inputs=[input_text],
outputs=[output, output_text]
)
if __name__ == "__main__":
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, 'interval', seconds=3600)
scheduler.start()
demo.queue(default_concurrency_limit=5)
demo.launch()
|