DaniloEducationServer commited on
Commit
d5ab69c
·
verified ·
1 Parent(s): dec0f84

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -69
app.py CHANGED
@@ -1,69 +1,136 @@
1
- FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04
2
-
3
- ENV DEBIAN_FRONTEND=noninteractive \
4
- TZ=America/Los_Angeles
5
-
6
- ARG USE_PERSISTENT_DATA
7
-
8
- RUN apt-get update && apt-get install -y \
9
- git \
10
- make build-essential libssl-dev zlib1g-dev \
11
- libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \
12
- libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev git-lfs \
13
- ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx \
14
- && rm -rf /var/lib/apt/lists/* \
15
- && git lfs install
16
-
17
- WORKDIR /code
18
-
19
- COPY ./requirements.txt /code/requirements.txt
20
-
21
- # User
22
- RUN useradd -m -u 1000 user
23
- USER user
24
- ENV HOME=/home/user \
25
- PATH=/home/user/.local/bin:$PATH
26
-
27
- # Pyenv
28
- RUN curl https://pyenv.run | bash
29
- ENV PATH=$HOME/.pyenv/shims:$HOME/.pyenv/bin:$PATH
30
-
31
- ARG PYTHON_VERSION=3.10.12
32
- # Python
33
- RUN pyenv install $PYTHON_VERSION && \
34
- pyenv global $PYTHON_VERSION && \
35
- pyenv rehash && \
36
- pip install --no-cache-dir --upgrade pip setuptools wheel && \
37
- pip install --no-cache-dir \
38
- datasets \
39
- huggingface-hub "protobuf<4" "click<8.1"
40
-
41
- RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
42
-
43
- # Set the working directory to /data if USE_PERSISTENT_DATA is set, otherwise set to $HOME/app
44
- WORKDIR $HOME/app
45
-
46
- # Copy the current directory contents into the container at $HOME/app setting the owner to the user
47
-
48
- RUN git clone https://github.com/lllyasviel/Fooocus . && \
49
- # pin to a specific commit
50
- git checkout bd4d40203c7e727fd06fc58ec122c25befcc5644 && \
51
- pip install --no-cache-dir xformers==0.0.20 triton==2.0.0 torch==2.0.1 torchvision==0.15.2 --extra-index-url https://download.pytorch.org/whl/cu118 && \
52
- pip install --no-cache-dir -r requirements_versions.txt
53
-
54
- # remove the next line if you're running on your own GPU, it set max images to 3 and disables the API
55
- RUN sed -i "s|image_number = gr.Slider(label='Image Number', minimum=1, maximum=32|image_number = gr.Slider(label='Image Number', minimum=1, maximum=3|" webui.py && \
56
- sed -i "s|shared.gradio_root = gr.Blocks(title='Fooocus ' + fooocus_version.version, css=modules.html.css).queue()|shared.gradio_root = gr.Blocks(title='Fooocus ' + fooocus_version.version, css=modules.html.css).queue(concurrency_count=1,api_open=False)|" webui.py && \
57
- sed -i "s|shared.gradio_root.launch(inbrowser=True, server_name=args.listen, server_port=args.port, share=args.share)|shared.gradio_root.launch(inbrowser=True, server_name=args.listen, server_port=args.port, share=args.share, show_api=False)|" webui.py
58
-
59
- ENV HOME=/home/user \
60
- PATH=/home/user/.local/bin:$PATH \
61
- PYTHONPATH=$HOME/app \
62
- PYTHONUNBUFFERED=1 \
63
- GRADIO_ALLOW_FLAGGING=never \
64
- GRADIO_NUM_PORTS=1 \
65
- GRADIO_SERVER_NAME=0.0.0.0 \
66
- GRADIO_THEME=huggingface \
67
- SYSTEM=spaces
68
-
69
- CMD ["python", "entry_with_update.py", "--listen"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import ssl
3
+ import sys
4
+
5
+ print('[System ARGV] ' + str(sys.argv))
6
+
7
+ root = os.path.dirname(os.path.abspath(__file__))
8
+ sys.path.append(root)
9
+ os.chdir(root)
10
+
11
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
12
+ os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
13
+ if "GRADIO_SERVER_PORT" not in os.environ:
14
+ os.environ["GRADIO_SERVER_PORT"] = "7865"
15
+
16
+ ssl._create_default_https_context = ssl._create_unverified_context
17
+
18
+ import platform
19
+ import fooocus_version
20
+
21
+ from build_launcher import build_launcher
22
+ from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content
23
+ from modules.model_loader import load_file_from_url
24
+
25
+ REINSTALL_ALL = False
26
+ TRY_INSTALL_XFORMERS = False
27
+
28
+
29
+ def prepare_environment():
30
+ torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://download.pytorch.org/whl/cu121")
31
+ torch_command = os.environ.get('TORCH_COMMAND',
32
+ f"pip install torch==2.1.0 torchvision==0.16.0 --extra-index-url {torch_index_url}")
33
+ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
34
+
35
+ print(f"Python {sys.version}")
36
+ print(f"Fooocus version: {fooocus_version.version}")
37
+
38
+ if REINSTALL_ALL or not is_installed("torch") or not is_installed("torchvision"):
39
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch", live=True)
40
+
41
+ if TRY_INSTALL_XFORMERS:
42
+ if REINSTALL_ALL or not is_installed("xformers"):
43
+ xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23')
44
+ if platform.system() == "Windows":
45
+ if platform.python_version().startswith("3.10"):
46
+ run_pip(f"install -U -I --no-deps {xformers_package}", "xformers", live=True)
47
+ else:
48
+ print("Installation of xformers is not supported in this version of Python.")
49
+ print(
50
+ "You can also check this and build manually: https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Xformers#building-xformers-on-windows-by-duckness")
51
+ if not is_installed("xformers"):
52
+ exit(0)
53
+ elif platform.system() == "Linux":
54
+ run_pip(f"install -U -I --no-deps {xformers_package}", "xformers")
55
+
56
+ if REINSTALL_ALL or not requirements_met(requirements_file):
57
+ run_pip(f"install -r \"{requirements_file}\"", "requirements")
58
+
59
+ return
60
+
61
+
62
+ vae_approx_filenames = [
63
+ ('xlvaeapp.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth'),
64
+ ('vaeapp_sd15.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt'),
65
+ ('xl-to-v1_interposer-v3.1.safetensors',
66
+ 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
67
+ ]
68
+
69
+
70
+ def ini_args():
71
+ from args_manager import args
72
+ return args
73
+
74
+
75
+ prepare_environment()
76
+ build_launcher()
77
+ args = ini_args()
78
+
79
+ if args.gpu_device_id is not None:
80
+ os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
81
+ print("Set device to:", args.gpu_device_id)
82
+
83
+ from modules import config
84
+
85
+ os.environ['GRADIO_TEMP_DIR'] = config.temp_path
86
+
87
+ if config.temp_path_cleanup_on_launch:
88
+ print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}')
89
+ result = delete_folder_content(config.temp_path, '[Cleanup] ')
90
+ if result:
91
+ print("[Cleanup] Cleanup successful")
92
+ else:
93
+ print(f"[Cleanup] Failed to delete content of temp dir.")
94
+
95
+
96
+ def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
97
+ for file_name, url in vae_approx_filenames:
98
+ load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)
99
+
100
+ load_file_from_url(
101
+ url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
102
+ model_dir=config.path_fooocus_expansion,
103
+ file_name='pytorch_model.bin'
104
+ )
105
+
106
+ if args.disable_preset_download:
107
+ print('Skipped model download.')
108
+ return default_model, checkpoint_downloads
109
+
110
+ if not args.always_download_new_model:
111
+ if not os.path.exists(os.path.join(config.paths_checkpoints[0], default_model)):
112
+ for alternative_model_name in previous_default_models:
113
+ if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)):
114
+ print(f'You do not have [{default_model}] but you have [{alternative_model_name}].')
115
+ print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, '
116
+ f'but you are not using the latest models.')
117
+ print('Use --always-download-new-model to avoid fallback and always get new models.')
118
+ checkpoint_downloads = {}
119
+ default_model = alternative_model_name
120
+ break
121
+
122
+ for file_name, url in checkpoint_downloads.items():
123
+ load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name)
124
+ for file_name, url in embeddings_downloads.items():
125
+ load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
126
+ for file_name, url in lora_downloads.items():
127
+ load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
128
+
129
+ return default_model, checkpoint_downloads
130
+
131
+
132
+ config.default_base_model_name, config.checkpoint_downloads = download_models(
133
+ config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads,
134
+ config.embeddings_downloads, config.lora_downloads)
135
+
136
+ from webui import *