sammyview80 commited on
Commit
9a83f7c
·
verified ·
1 Parent(s): d741eea

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +8 -5
  2. app.py +158 -0
  3. requirements.txt +100 -0
README.md CHANGED
@@ -1,10 +1,13 @@
1
  ---
2
- title: Playground
3
- emoji: 🏃
4
- colorFrom: green
5
- colorTo: red
6
- sdk: docker
 
 
7
  pinned: false
 
8
  ---
9
 
10
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Playground V2.5
3
+ emoji: 🌍
4
+ colorFrom: pink
5
+ colorTo: green
6
+ sdk: gradio
7
+ sdk_version: 4.8.0
8
+ app_file: app.py
9
  pinned: false
10
+ license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import os
4
+ import random
5
+ import uuid
6
+
7
+ import numpy as np
8
+ from PIL import Image
9
+ import spaces
10
+ import torch
11
+ from diffusers import DiffusionPipeline
12
+
13
+
14
+ from flask import Flask, flash, request
15
+ from flask_session import Session
16
+
17
+
18
+ app = Flask(__name__)
19
+ app.config["SESSION_PERMANENT"] = False
20
+ app.config["SESSION_TYPE"] = "filesystem"
21
+
22
+ Session(app)
23
+
24
+
25
+ DESCRIPTION = """# Playground v2.5"""
26
+ if not torch.cuda.is_available():
27
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
28
+
29
+ MAX_SEED = np.iinfo(np.int32).max
30
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "1") == "1"
31
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
32
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
33
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
34
+
35
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
36
+
37
+ NUM_IMAGES_PER_PROMPT = 1
38
+
39
+ if torch.cuda.is_available():
40
+ pipe = DiffusionPipeline.from_pretrained(
41
+ "playgroundai/playground-v2.5-1024px-aesthetic",
42
+ torch_dtype=torch.float16,
43
+ use_safetensors=True,
44
+ add_watermarker=False,
45
+ variant="fp16"
46
+ )
47
+ if ENABLE_CPU_OFFLOAD:
48
+ pipe.enable_model_cpu_offload()
49
+ else:
50
+ pipe.to(device)
51
+ print("Loaded on Device!")
52
+
53
+ if USE_TORCH_COMPILE:
54
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
55
+ print("Model Compiled!")
56
+
57
+
58
+ def save_image(img):
59
+ unique_name = str(uuid.uuid4()) + ".png"
60
+ img.save(unique_name)
61
+ return unique_name
62
+
63
+
64
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
65
+ if randomize_seed:
66
+ seed = random.randint(0, MAX_SEED)
67
+ return seed
68
+
69
+
70
+ # @spaces.GPU(enable_queue=True)
71
+ def generate(
72
+ prompt: str,
73
+ negative_prompt: str = "",
74
+ use_negative_prompt: bool = False,
75
+ seed: int = 0,
76
+ width: int = 1024,
77
+ height: int = 1024,
78
+ guidance_scale: float = 3,
79
+ randomize_seed: bool = False,
80
+ use_resolution_binning: bool = True,
81
+ ):
82
+
83
+ if torch.cuda.is_available():
84
+ pipe = DiffusionPipeline.from_pretrained(
85
+ "playgroundai/playground-v2.5-1024px-aesthetic",
86
+ torch_dtype=torch.float16,
87
+ use_safetensors=True,
88
+ add_watermarker=False,
89
+ variant="fp16"
90
+ )
91
+ if ENABLE_CPU_OFFLOAD:
92
+ pipe.enable_model_cpu_offload()
93
+ else:
94
+ pipe.to(device)
95
+ print("Loaded on Device!")
96
+
97
+ if USE_TORCH_COMPILE:
98
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
99
+ print("Model Compiled!")
100
+
101
+ pipe.to(device)
102
+ seed = int(randomize_seed_fn(seed, randomize_seed))
103
+ generator = torch.Generator().manual_seed(seed)
104
+
105
+ if not use_negative_prompt:
106
+ negative_prompt = None # type: ignore
107
+
108
+ images = pipe(
109
+ prompt=prompt,
110
+ negative_prompt=negative_prompt,
111
+ width=width,
112
+ height=height,
113
+ guidance_scale=guidance_scale,
114
+ num_inference_steps=25,
115
+ generator=generator,
116
+ num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
117
+ use_resolution_binning=use_resolution_binning,
118
+ output_type="pil",
119
+ ).images
120
+
121
+ image_paths = [save_image(img) for img in images]
122
+ print(image_paths)
123
+ return image_paths, seed
124
+
125
+
126
+ examples = [
127
+ "neon holography crystal cat",
128
+ "a cat eating a piece of cheese",
129
+ "an astronaut riding a horse in space",
130
+ "a cartoon of a boy playing with a tiger",
131
+ "a cute robot artist painting on an easel, concept art",
132
+ "a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
133
+ ]
134
+
135
+
136
+
137
+ @app.route("/", methods=['GET', 'POST'])
138
+ def hello():
139
+ if request.method == 'POST':
140
+ if 'file' and 'file1' not in request.files:
141
+ flash('No file part')
142
+ return {"status": "Failed", "message": "Please Provide file name(file)."}
143
+ file = request.files['file']
144
+ file1 = request.files['file1']
145
+ image = Image.open(file)
146
+ image1 = Image.open(file1)
147
+ preprocess_image = generate('a boy playing with basketball')
148
+ # print(preprocess_image)
149
+ return {"status": "Success", "message": "You can download the 3D model.", "data": preprocess_image}
150
+
151
+ else:
152
+ return {
153
+ "status": "Success",
154
+ "message":"You can upload an image file to get the 3D model."
155
+ }
156
+
157
+ if "__main__" == __name__:
158
+ app.run(debug=True)
requirements.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.28.0
2
+ aiofiles==23.2.1
3
+ altair==5.2.0
4
+ annotated-types==0.6.0
5
+ anyio==4.3.0
6
+ attrs==23.2.0
7
+ blinker==1.7.0
8
+ cachelib==0.12.0
9
+ certifi==2024.2.2
10
+ charset-normalizer==3.3.2
11
+ click==8.1.7
12
+ colorama==0.4.6
13
+ contourpy==1.2.0
14
+ cycler==0.12.1
15
+ diffusers @ git+https://github.com/huggingface/diffusers.git@363699044e365ef977a7646b500402fa585e1b6b
16
+ exceptiongroup==1.2.0
17
+ fastapi==0.110.0
18
+ ffmpy==0.3.2
19
+ filelock==3.13.1
20
+ Flask==3.0.2
21
+ Flask-Session==0.7.0
22
+ fonttools==4.50.0
23
+ fsspec==2024.3.1
24
+ gradio==4.22.0
25
+ gradio_client==0.13.0
26
+ h11==0.14.0
27
+ httpcore==1.0.4
28
+ httpx==0.27.0
29
+ huggingface-hub==0.21.4
30
+ idna==3.6
31
+ importlib_metadata==7.1.0
32
+ importlib_resources==6.4.0
33
+ itsdangerous==2.1.2
34
+ Jinja2==3.1.3
35
+ jsonschema==4.21.1
36
+ jsonschema-specifications==2023.12.1
37
+ kiwisolver==1.4.5
38
+ markdown-it-py==3.0.0
39
+ MarkupSafe==2.1.5
40
+ matplotlib==3.8.3
41
+ mdurl==0.1.2
42
+ mpmath==1.3.0
43
+ msgspec==0.18.6
44
+ networkx==3.2.1
45
+ numpy==1.26.4
46
+ nvidia-cublas-cu12==12.1.3.1
47
+ nvidia-cuda-cupti-cu12==12.1.105
48
+ nvidia-cuda-nvrtc-cu12==12.1.105
49
+ nvidia-cuda-runtime-cu12==12.1.105
50
+ nvidia-cudnn-cu12==8.9.2.26
51
+ nvidia-cufft-cu12==11.0.2.54
52
+ nvidia-curand-cu12==10.3.2.106
53
+ nvidia-cusolver-cu12==11.4.5.107
54
+ nvidia-cusparse-cu12==12.1.0.106
55
+ nvidia-nccl-cu12==2.19.3
56
+ nvidia-nvjitlink-cu12==12.4.99
57
+ nvidia-nvtx-cu12==12.1.105
58
+ orjson==3.9.15
59
+ packaging==24.0
60
+ pandas==2.2.1
61
+ pillow==10.2.0
62
+ psutil==5.9.8
63
+ pydantic==2.6.4
64
+ pydantic_core==2.16.3
65
+ pydub==0.25.1
66
+ Pygments==2.17.2
67
+ pyparsing==3.1.2
68
+ python-dateutil==2.9.0.post0
69
+ python-multipart==0.0.9
70
+ pytz==2024.1
71
+ PyYAML==6.0.1
72
+ referencing==0.34.0
73
+ regex==2023.12.25
74
+ requests==2.31.0
75
+ rich==13.7.1
76
+ rpds-py==0.18.0
77
+ ruff==0.3.4
78
+ safetensors==0.4.2
79
+ semantic-version==2.10.0
80
+ shellingham==1.5.4
81
+ six==1.16.0
82
+ sniffio==1.3.1
83
+ spaces==0.24.2
84
+ starlette==0.36.3
85
+ sympy==1.12
86
+ tokenizers==0.15.2
87
+ tomlkit==0.12.0
88
+ toolz==0.12.1
89
+ torch==2.2.1
90
+ tqdm==4.66.2
91
+ transformers==4.39.1
92
+ triton==2.2.0
93
+ typer==0.10.0
94
+ typing_extensions==4.10.0
95
+ tzdata==2024.1
96
+ urllib3==2.2.1
97
+ uvicorn==0.29.0
98
+ websockets==11.0.3
99
+ Werkzeug==3.0.1
100
+ zipp==3.18.1