NagisaNao commited on
Commit
c8f1ef6
·
verified ·
1 Parent(s): 8fb4aca
files_cells/notebooks/en/downloading_en.ipynb CHANGED
@@ -344,6 +344,7 @@
344
  " \"adetailer\": adetailer_dir\n",
345
  "}\n",
346
  "\n",
 
347
  "# !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
348
  "directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
349
  "!mkdir -p {\" \".join(directories)}\n",
@@ -375,15 +376,6 @@
375
  " image_url = images[0].get('url', None) # get preview: first image\n",
376
  " return model_name, image_url\n",
377
  "\n",
378
- "def modify_image_url(image_url):\n",
379
- " parts = image_url.split('/')\n",
380
- " for i, part in enumerate(parts):\n",
381
- " if part.startswith('width='):\n",
382
- " width_value = int(part.split('=')[1])\n",
383
- " parts[i] = f'width={width_value * 2}' # resize for quality image\n",
384
- " break\n",
385
- " return '/'.join(parts)\n",
386
- "\n",
387
  "def generate_preview_filename(model_name, image_url):\n",
388
  " file_parts = model_name.split('.')\n",
389
  " image_format = image_url.split('.')[-1].split('?')[0]\n",
@@ -407,6 +399,7 @@
407
  " extension_repo.append((url, file_name))\n",
408
  "\n",
409
  "def manual_download(url, dst_dir, file_name):\n",
 
410
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
411
  " header_option = f\"--header={user_header}\"\n",
412
  "\n",
@@ -423,11 +416,10 @@
423
  " if data:\n",
424
  " model_name, image_url = extract_file_and_image_info(data)\n",
425
  " if model_name and image_url:\n",
426
- " new_image_url = modify_image_url(image_url)\n",
427
- " image_file_name = generate_preview_filename(model_name if not file_name else file_name, new_image_url)\n",
428
- " save_img_path = f\"{dst_dir}/{image_file_name}\"\n",
429
- " !wget -O {save_img_path} {new_image_url} # download image\n",
430
- " # print(f\"\\n\\n\\n{save_img_path, new_image_url}\\n\\n\\n\")\n",
431
  " else:\n",
432
  " print(\"File name or image URL missing.\")\n",
433
  " else:\n",
@@ -442,22 +434,24 @@
442
  " !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
443
  " else:\n",
444
  " !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
445
- " # -- Huggin Face --\n",
 
446
  " elif 'huggingface' in url:\n",
447
  " if '/blob/' in url:\n",
448
  " url = url.replace('/blob/', '/resolve/')\n",
449
  " if file_name:\n",
450
- " !aria2c {header_option} --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -j5 -x16 -s16 -k1M -c -d {dst_dir} -o {basename} {url}\n",
451
  " else:\n",
452
  " parsed_link = f'\\n{url}\\n\\tout={unquote(url.split(\"/\")[-1])}'\n",
453
- " !echo -e \"{parsed_link}\" | aria2c {header_option} --console-log-level=error --summary-interval=10 -i- -j5 -x16 -s16 -k1M -c -d \"{dst_dir}\" -o {basename}\n",
 
454
  " # -- Other --\n",
455
  " elif 'http' in url or 'magnet' in url:\n",
456
  " if file_name:\n",
457
- " !aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c -d {dst_dir} -o {file_name} {url}\n",
458
  " else:\n",
459
  " parsed_link = '\"{}\"'.format(url)\n",
460
- " !aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c -d {dst_dir} -Z {parsed_link}\n",
461
  "\n",
462
  "def download(url):\n",
463
  " links_and_paths = url.split(',')\n",
 
344
  " \"adetailer\": adetailer_dir\n",
345
  "}\n",
346
  "\n",
347
+ "aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
348
  "# !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
349
  "directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
350
  "!mkdir -p {\" \".join(directories)}\n",
 
376
  " image_url = images[0].get('url', None) # get preview: first image\n",
377
  " return model_name, image_url\n",
378
  "\n",
 
 
 
 
 
 
 
 
 
379
  "def generate_preview_filename(model_name, image_url):\n",
380
  " file_parts = model_name.split('.')\n",
381
  " image_format = image_url.split('.')[-1].split('?')[0]\n",
 
399
  " extension_repo.append((url, file_name))\n",
400
  "\n",
401
  "def manual_download(url, dst_dir, file_name):\n",
402
+ " global aria2_args\n",
403
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
404
  " header_option = f\"--header={user_header}\"\n",
405
  "\n",
 
416
  " if data:\n",
417
  " model_name, image_url = extract_file_and_image_info(data)\n",
418
  " if model_name and image_url:\n",
419
+ " image_file_name = generate_preview_filename(model_name if not file_name else file_name, image_url)\n",
420
+ " with capture.capture_output() as cap: # clear shit\n",
421
+ " !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}\n",
422
+ " del cap\n",
 
423
  " else:\n",
424
  " print(\"File name or image URL missing.\")\n",
425
  " else:\n",
 
434
  " !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
435
  " else:\n",
436
  " !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
437
+ "\n",
438
+ " # -- Hugging Face --\n",
439
  " elif 'huggingface' in url:\n",
440
  " if '/blob/' in url:\n",
441
  " url = url.replace('/blob/', '/resolve/')\n",
442
  " if file_name:\n",
443
+ " !aria2c {header_option} {aria2_args} {dst_dir} -o {basename} {url}\n",
444
  " else:\n",
445
  " parsed_link = f'\\n{url}\\n\\tout={unquote(url.split(\"/\")[-1])}'\n",
446
+ " !echo -e \"{parsed_link}\" | aria2c {header_option} {aria2_args} -d \"{dst_dir}\" -o {basename}\n",
447
+ "\n",
448
  " # -- Other --\n",
449
  " elif 'http' in url or 'magnet' in url:\n",
450
  " if file_name:\n",
451
+ " !aria2c {aria2_args} -d {dst_dir} -o {file_name} {url}\n",
452
  " else:\n",
453
  " parsed_link = '\"{}\"'.format(url)\n",
454
+ " !aria2c {aria2_args} -d {dst_dir} -Z {parsed_link}\n",
455
  "\n",
456
  "def download(url):\n",
457
  " links_and_paths = url.split(',')\n",
files_cells/notebooks/ru/downloading_ru.ipynb CHANGED
@@ -344,6 +344,7 @@
344
  " \"adetailer\": adetailer_dir\n",
345
  "}\n",
346
  "\n",
 
347
  "# !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
348
  "directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
349
  "!mkdir -p {\" \".join(directories)}\n",
@@ -375,15 +376,6 @@
375
  " image_url = images[0].get('url', None) # get preview: first image\n",
376
  " return model_name, image_url\n",
377
  "\n",
378
- "def modify_image_url(image_url):\n",
379
- " parts = image_url.split('/')\n",
380
- " for i, part in enumerate(parts):\n",
381
- " if part.startswith('width='):\n",
382
- " width_value = int(part.split('=')[1])\n",
383
- " parts[i] = f'width={width_value * 2}' # resize for quality image\n",
384
- " break\n",
385
- " return '/'.join(parts)\n",
386
- "\n",
387
  "def generate_preview_filename(model_name, image_url):\n",
388
  " file_parts = model_name.split('.')\n",
389
  " image_format = image_url.split('.')[-1].split('?')[0]\n",
@@ -407,6 +399,7 @@
407
  " extension_repo.append((url, file_name))\n",
408
  "\n",
409
  "def manual_download(url, dst_dir, file_name):\n",
 
410
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
411
  " header_option = f\"--header={user_header}\"\n",
412
  "\n",
@@ -423,11 +416,10 @@
423
  " if data:\n",
424
  " model_name, image_url = extract_file_and_image_info(data)\n",
425
  " if model_name and image_url:\n",
426
- " new_image_url = modify_image_url(image_url)\n",
427
- " image_file_name = generate_preview_filename(model_name if not file_name else file_name, new_image_url)\n",
428
- " save_img_path = f\"{dst_dir}/{image_file_name}\"\n",
429
- " !wget -O {save_img_path} {new_image_url} # download image\n",
430
- " # print(f\"\\n\\n\\n{save_img_path, new_image_url}\\n\\n\\n\")\n",
431
  " else:\n",
432
  " print(\"File name or image URL missing.\")\n",
433
  " else:\n",
@@ -442,22 +434,24 @@
442
  " !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
443
  " else:\n",
444
  " !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
 
445
  " # -- Hugging Face --\n",
446
  " elif 'huggingface' in url:\n",
447
  " if '/blob/' in url:\n",
448
  " url = url.replace('/blob/', '/resolve/')\n",
449
  " if file_name:\n",
450
- " !aria2c {header_option} --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -j5 -x16 -s16 -k1M -c -d {dst_dir} -o {basename} {url}\n",
451
  " else:\n",
452
  " parsed_link = f'\\n{url}\\n\\tout={unquote(url.split(\"/\")[-1])}'\n",
453
- " !echo -e \"{parsed_link}\" | aria2c {header_option} --console-log-level=error --summary-interval=10 -i- -j5 -x16 -s16 -k1M -c -d \"{dst_dir}\" -o {basename}\n",
 
454
  " # -- Other --\n",
455
  " elif 'http' in url or 'magnet' in url:\n",
456
  " if file_name:\n",
457
- " !aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c -d {dst_dir} -o {file_name} {url}\n",
458
  " else:\n",
459
  " parsed_link = '\"{}\"'.format(url)\n",
460
- " !aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c -d {dst_dir} -Z {parsed_link}\n",
461
  "\n",
462
  "def download(url):\n",
463
  " links_and_paths = url.split(',')\n",
 
344
  " \"adetailer\": adetailer_dir\n",
345
  "}\n",
346
  "\n",
347
+ "aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'\n",
348
  "# !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}\n",
349
  "directories = [value for key, value in prefixes.items()] # for unpucking zip files\n",
350
  "!mkdir -p {\" \".join(directories)}\n",
 
376
  " image_url = images[0].get('url', None) # get preview: first image\n",
377
  " return model_name, image_url\n",
378
  "\n",
 
 
 
 
 
 
 
 
 
379
  "def generate_preview_filename(model_name, image_url):\n",
380
  " file_parts = model_name.split('.')\n",
381
  " image_format = image_url.split('.')[-1].split('?')[0]\n",
 
399
  " extension_repo.append((url, file_name))\n",
400
  "\n",
401
  "def manual_download(url, dst_dir, file_name):\n",
402
+ " global aria2_args\n",
403
  " basename = url.split(\"/\")[-1] if file_name is None else file_name\n",
404
  " header_option = f\"--header={user_header}\"\n",
405
  "\n",
 
416
  " if data:\n",
417
  " model_name, image_url = extract_file_and_image_info(data)\n",
418
  " if model_name and image_url:\n",
419
+ " image_file_name = generate_preview_filename(model_name if not file_name else file_name, image_url)\n",
420
+ " with capture.capture_output() as cap: # clear shit\n",
421
+ " !aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}\n",
422
+ " del cap\n",
 
423
  " else:\n",
424
  " print(\"File name or image URL missing.\")\n",
425
  " else:\n",
 
434
  " !gdown \"{url}\" -O {dst_dir}/{file_name} --fuzzy -c\n",
435
  " else:\n",
436
  " !gdown \"{url}\" -O {dst_dir} --fuzzy -c\n",
437
+ "\n",
438
  " # -- Hugging Face --\n",
439
  " elif 'huggingface' in url:\n",
440
  " if '/blob/' in url:\n",
441
  " url = url.replace('/blob/', '/resolve/')\n",
442
  " if file_name:\n",
443
+ " !aria2c {header_option} {aria2_args} {dst_dir} -o {basename} {url}\n",
444
  " else:\n",
445
  " parsed_link = f'\\n{url}\\n\\tout={unquote(url.split(\"/\")[-1])}'\n",
446
+ " !echo -e \"{parsed_link}\" | aria2c {header_option} {aria2_args} -d \"{dst_dir}\" -o {basename}\n",
447
+ "\n",
448
  " # -- Other --\n",
449
  " elif 'http' in url or 'magnet' in url:\n",
450
  " if file_name:\n",
451
+ " !aria2c {aria2_args} -d {dst_dir} -o {file_name} {url}\n",
452
  " else:\n",
453
  " parsed_link = '\"{}\"'.format(url)\n",
454
+ " !aria2c {aria2_args} -d {dst_dir} -Z {parsed_link}\n",
455
  "\n",
456
  "def download(url):\n",
457
  " links_and_paths = url.split(',')\n",
files_cells/python/en/downloading_en.py CHANGED
@@ -325,6 +325,7 @@ prefixes = {
325
  "adetailer": adetailer_dir
326
  }
327
 
 
328
  # !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}
329
  directories = [value for key, value in prefixes.items()] # for unpucking zip files
330
  get_ipython().system('mkdir -p {" ".join(directories)}')
@@ -356,15 +357,6 @@ def extract_file_and_image_info(data):
356
  image_url = images[0].get('url', None) # get preview: first image
357
  return model_name, image_url
358
 
359
- def modify_image_url(image_url):
360
- parts = image_url.split('/')
361
- for i, part in enumerate(parts):
362
- if part.startswith('width='):
363
- width_value = int(part.split('=')[1])
364
- parts[i] = f'width={width_value * 2}' # resize for quality image
365
- break
366
- return '/'.join(parts)
367
-
368
  def generate_preview_filename(model_name, image_url):
369
  file_parts = model_name.split('.')
370
  image_format = image_url.split('.')[-1].split('?')[0]
@@ -388,6 +380,7 @@ def handle_manual(url):
388
  extension_repo.append((url, file_name))
389
 
390
  def manual_download(url, dst_dir, file_name):
 
391
  basename = url.split("/")[-1] if file_name is None else file_name
392
  header_option = f"--header={user_header}"
393
 
@@ -404,11 +397,10 @@ def manual_download(url, dst_dir, file_name):
404
  if data:
405
  model_name, image_url = extract_file_and_image_info(data)
406
  if model_name and image_url:
407
- new_image_url = modify_image_url(image_url)
408
- image_file_name = generate_preview_filename(model_name if not file_name else file_name, new_image_url)
409
- save_img_path = f"{dst_dir}/{image_file_name}"
410
- get_ipython().system('wget -O {save_img_path} {new_image_url} # download image')
411
- # print(f"\n\n\n{save_img_path, new_image_url}\n\n\n")
412
  else:
413
  print("File name or image URL missing.")
414
  else:
@@ -423,22 +415,24 @@ def manual_download(url, dst_dir, file_name):
423
  get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
424
  else:
425
  get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
426
- # -- Huggin Face --
 
427
  elif 'huggingface' in url:
428
  if '/blob/' in url:
429
  url = url.replace('/blob/', '/resolve/')
430
  if file_name:
431
- get_ipython().system('aria2c {header_option} --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -j5 -x16 -s16 -k1M -c -d {dst_dir} -o {basename} {url}')
432
  else:
433
  parsed_link = f'\n{url}\n\tout={unquote(url.split("/")[-1])}'
434
- get_ipython().system('echo -e "{parsed_link}" | aria2c {header_option} --console-log-level=error --summary-interval=10 -i- -j5 -x16 -s16 -k1M -c -d "{dst_dir}" -o {basename}')
 
435
  # -- Other --
436
  elif 'http' in url or 'magnet' in url:
437
  if file_name:
438
- get_ipython().system('aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c -d {dst_dir} -o {file_name} {url}')
439
  else:
440
  parsed_link = '"{}"'.format(url)
441
- get_ipython().system('aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c -d {dst_dir} -Z {parsed_link}')
442
 
443
  def download(url):
444
  links_and_paths = url.split(',')
 
325
  "adetailer": adetailer_dir
326
  }
327
 
328
+ aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
329
  # !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}
330
  directories = [value for key, value in prefixes.items()] # for unpucking zip files
331
  get_ipython().system('mkdir -p {" ".join(directories)}')
 
357
  image_url = images[0].get('url', None) # get preview: first image
358
  return model_name, image_url
359
 
 
 
 
 
 
 
 
 
 
360
  def generate_preview_filename(model_name, image_url):
361
  file_parts = model_name.split('.')
362
  image_format = image_url.split('.')[-1].split('?')[0]
 
380
  extension_repo.append((url, file_name))
381
 
382
  def manual_download(url, dst_dir, file_name):
383
+ global aria2_args
384
  basename = url.split("/")[-1] if file_name is None else file_name
385
  header_option = f"--header={user_header}"
386
 
 
397
  if data:
398
  model_name, image_url = extract_file_and_image_info(data)
399
  if model_name and image_url:
400
+ image_file_name = generate_preview_filename(model_name if not file_name else file_name, image_url)
401
+ with capture.capture_output() as cap: # clear shit
402
+ get_ipython().system('aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}')
403
+ del cap
 
404
  else:
405
  print("File name or image URL missing.")
406
  else:
 
415
  get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
416
  else:
417
  get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
418
+
419
+ # -- Hugging Face --
420
  elif 'huggingface' in url:
421
  if '/blob/' in url:
422
  url = url.replace('/blob/', '/resolve/')
423
  if file_name:
424
+ get_ipython().system('aria2c {header_option} {aria2_args} {dst_dir} -o {basename} {url}')
425
  else:
426
  parsed_link = f'\n{url}\n\tout={unquote(url.split("/")[-1])}'
427
+ get_ipython().system('echo -e "{parsed_link}" | aria2c {header_option} {aria2_args} -d "{dst_dir}" -o {basename}')
428
+
429
  # -- Other --
430
  elif 'http' in url or 'magnet' in url:
431
  if file_name:
432
+ get_ipython().system('aria2c {aria2_args} -d {dst_dir} -o {file_name} {url}')
433
  else:
434
  parsed_link = '"{}"'.format(url)
435
+ get_ipython().system('aria2c {aria2_args} -d {dst_dir} -Z {parsed_link}')
436
 
437
  def download(url):
438
  links_and_paths = url.split(',')
files_cells/python/ru/downloading_ru.py CHANGED
@@ -325,6 +325,7 @@ prefixes = {
325
  "adetailer": adetailer_dir
326
  }
327
 
 
328
  # !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}
329
  directories = [value for key, value in prefixes.items()] # for unpucking zip files
330
  get_ipython().system('mkdir -p {" ".join(directories)}')
@@ -356,15 +357,6 @@ def extract_file_and_image_info(data):
356
  image_url = images[0].get('url', None) # get preview: first image
357
  return model_name, image_url
358
 
359
- def modify_image_url(image_url):
360
- parts = image_url.split('/')
361
- for i, part in enumerate(parts):
362
- if part.startswith('width='):
363
- width_value = int(part.split('=')[1])
364
- parts[i] = f'width={width_value * 2}' # resize for quality image
365
- break
366
- return '/'.join(parts)
367
-
368
  def generate_preview_filename(model_name, image_url):
369
  file_parts = model_name.split('.')
370
  image_format = image_url.split('.')[-1].split('?')[0]
@@ -388,6 +380,7 @@ def handle_manual(url):
388
  extension_repo.append((url, file_name))
389
 
390
  def manual_download(url, dst_dir, file_name):
 
391
  basename = url.split("/")[-1] if file_name is None else file_name
392
  header_option = f"--header={user_header}"
393
 
@@ -404,11 +397,10 @@ def manual_download(url, dst_dir, file_name):
404
  if data:
405
  model_name, image_url = extract_file_and_image_info(data)
406
  if model_name and image_url:
407
- new_image_url = modify_image_url(image_url)
408
- image_file_name = generate_preview_filename(model_name if not file_name else file_name, new_image_url)
409
- save_img_path = f"{dst_dir}/{image_file_name}"
410
- get_ipython().system('wget -O {save_img_path} {new_image_url} # download image')
411
- # print(f"\n\n\n{save_img_path, new_image_url}\n\n\n")
412
  else:
413
  print("File name or image URL missing.")
414
  else:
@@ -423,22 +415,24 @@ def manual_download(url, dst_dir, file_name):
423
  get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
424
  else:
425
  get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
 
426
  # -- Hugging Face --
427
  elif 'huggingface' in url:
428
  if '/blob/' in url:
429
  url = url.replace('/blob/', '/resolve/')
430
  if file_name:
431
- get_ipython().system('aria2c {header_option} --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -c -j5 -x16 -s16 -k1M -c -d {dst_dir} -o {basename} {url}')
432
  else:
433
  parsed_link = f'\n{url}\n\tout={unquote(url.split("/")[-1])}'
434
- get_ipython().system('echo -e "{parsed_link}" | aria2c {header_option} --console-log-level=error --summary-interval=10 -i- -j5 -x16 -s16 -k1M -c -d "{dst_dir}" -o {basename}')
 
435
  # -- Other --
436
  elif 'http' in url or 'magnet' in url:
437
  if file_name:
438
- get_ipython().system('aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c -d {dst_dir} -o {file_name} {url}')
439
  else:
440
  parsed_link = '"{}"'.format(url)
441
- get_ipython().system('aria2c --optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c -d {dst_dir} -Z {parsed_link}')
442
 
443
  def download(url):
444
  links_and_paths = url.split(',')
 
325
  "adetailer": adetailer_dir
326
  }
327
 
328
+ aria2_args = '--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 -j5 -x16 -s16 -k1M -c'
329
  # !mkdir -p {models_dir} {vaes_dir} {loras_dir} {embeddings_dir} {extensions_dir} {control_dir} {adetailer_dir}
330
  directories = [value for key, value in prefixes.items()] # for unpucking zip files
331
  get_ipython().system('mkdir -p {" ".join(directories)}')
 
357
  image_url = images[0].get('url', None) # get preview: first image
358
  return model_name, image_url
359
 
 
 
 
 
 
 
 
 
 
360
  def generate_preview_filename(model_name, image_url):
361
  file_parts = model_name.split('.')
362
  image_format = image_url.split('.')[-1].split('?')[0]
 
380
  extension_repo.append((url, file_name))
381
 
382
  def manual_download(url, dst_dir, file_name):
383
+ global aria2_args
384
  basename = url.split("/")[-1] if file_name is None else file_name
385
  header_option = f"--header={user_header}"
386
 
 
397
  if data:
398
  model_name, image_url = extract_file_and_image_info(data)
399
  if model_name and image_url:
400
+ image_file_name = generate_preview_filename(model_name if not file_name else file_name, image_url)
401
+ with capture.capture_output() as cap: # clear shit
402
+ get_ipython().system('aria2c {aria2_args} -d {dst_dir} -o {image_file_name} {image_url}')
403
+ del cap
 
404
  else:
405
  print("File name or image URL missing.")
406
  else:
 
415
  get_ipython().system('gdown "{url}" -O {dst_dir}/{file_name} --fuzzy -c')
416
  else:
417
  get_ipython().system('gdown "{url}" -O {dst_dir} --fuzzy -c')
418
+
419
  # -- Hugging Face --
420
  elif 'huggingface' in url:
421
  if '/blob/' in url:
422
  url = url.replace('/blob/', '/resolve/')
423
  if file_name:
424
+ get_ipython().system('aria2c {header_option} {aria2_args} {dst_dir} -o {basename} {url}')
425
  else:
426
  parsed_link = f'\n{url}\n\tout={unquote(url.split("/")[-1])}'
427
+ get_ipython().system('echo -e "{parsed_link}" | aria2c {header_option} {aria2_args} -d "{dst_dir}" -o {basename}')
428
+
429
  # -- Other --
430
  elif 'http' in url or 'magnet' in url:
431
  if file_name:
432
+ get_ipython().system('aria2c {aria2_args} -d {dst_dir} -o {file_name} {url}')
433
  else:
434
  parsed_link = '"{}"'.format(url)
435
+ get_ipython().system('aria2c {aria2_args} -d {dst_dir} -Z {parsed_link}')
436
 
437
  def download(url):
438
  links_and_paths = url.split(',')