π
Browse files- files_cells/notebooks/en/auto_cleaner_en.ipynb +1 -2
- files_cells/notebooks/en/downloading_en.ipynb +42 -53
- files_cells/notebooks/en/launch_en.ipynb +0 -1
- files_cells/notebooks/ru/auto_cleaner_ru.ipynb +1 -2
- files_cells/notebooks/ru/downloading_ru.ipynb +42 -53
- files_cells/notebooks/ru/launch_ru.ipynb +0 -1
- files_cells/python/en/auto_cleaner_en.py +1 -2
- files_cells/python/en/downloading_en.py +42 -53
- files_cells/python/en/launch_en.py +0 -1
- files_cells/python/ru/auto_cleaner_ru.py +1 -2
- files_cells/python/ru/downloading_ru.py +42 -53
- files_cells/python/ru/launch_ru.py +0 -1
- modules/directory_setup.py +1 -1
files_cells/notebooks/en/auto_cleaner_en.ipynb
CHANGED
@@ -55,7 +55,6 @@
|
|
55 |
"\"\"\" functions \"\"\"\n",
|
56 |
"def clean_directory(directory):\n",
|
57 |
" deleted_files = 0\n",
|
58 |
-
" image_dir = directories['Images']\n",
|
59 |
"\n",
|
60 |
" for root, dirs, files in os.walk(directory):\n",
|
61 |
" for file in files:\n",
|
@@ -63,7 +62,7 @@
|
|
63 |
"\n",
|
64 |
" if file.endswith(\".txt\"):\n",
|
65 |
" continue\n",
|
66 |
-
" if file.endswith((\".safetensors\", \".pt\"
|
67 |
" deleted_files += 1\n",
|
68 |
"\n",
|
69 |
" os.remove(file_path)\n",
|
|
|
55 |
"\"\"\" functions \"\"\"\n",
|
56 |
"def clean_directory(directory):\n",
|
57 |
" deleted_files = 0\n",
|
|
|
58 |
"\n",
|
59 |
" for root, dirs, files in os.walk(directory):\n",
|
60 |
" for file in files:\n",
|
|
|
62 |
"\n",
|
63 |
" if file.endswith(\".txt\"):\n",
|
64 |
" continue\n",
|
65 |
+
" if file.endswith((\".safetensors\", \".pt\", \".png\", \".jpg\", \".jpeg\")):\n",
|
66 |
" deleted_files += 1\n",
|
67 |
"\n",
|
68 |
" os.remove(file_path)\n",
|
files_cells/notebooks/en/downloading_en.ipynb
CHANGED
@@ -37,6 +37,7 @@
|
|
37 |
"UI = os.getenv('SDW_UI')\n",
|
38 |
"OLD_UI = os.getenv('SDW_OLD_UI')\n",
|
39 |
"\n",
|
|
|
40 |
"\n",
|
41 |
"# ============ loading settings V4 =============\n",
|
42 |
"def load_settings(path):\n",
|
@@ -235,8 +236,6 @@
|
|
235 |
" !sed -i '521s/open=\\(False\\|True\\)/open=False/' {webui_path}/extensions/Umi-AI-Wildcards/scripts/wildcard_recursive.py # Closed accordion by default\n",
|
236 |
" # --- Encrypt-Image ---\n",
|
237 |
" !sed -i '9,37d' {webui_path}/extensions/Encrypt-Image/javascript/encrypt_images_info.js # Removes the weird text in webui\n",
|
238 |
-
" # --- Additional-Networks ---\n",
|
239 |
-
" !wget -O {webui_path}/extensions/additional-networks/scripts/metadata_editor.py {anxety_repos}/extensions/Additional-Networks/fix/metadata_editor.py # Fixing an error due to old style\n",
|
240 |
"del cap\n",
|
241 |
"\n",
|
242 |
"\n",
|
@@ -255,7 +254,7 @@
|
|
255 |
"## Downloading model and stuff | oh~ Hey! If you're freaked out by that code too, don't worry, me too!\n",
|
256 |
"print(\"π¦ Downloading models and stuff...\", end='')\n",
|
257 |
"\n",
|
258 |
-
"
|
259 |
"PREFIXES = {\n",
|
260 |
" \"model\": models_dir,\n",
|
261 |
" \"vae\": vaes_dir,\n",
|
@@ -266,31 +265,22 @@
|
|
266 |
" \"adetailer\": adetailer_dir,\n",
|
267 |
" \"config\": webui_path\n",
|
268 |
"}\n",
|
269 |
-
"\n",
|
270 |
-
"extension_repo = []\n",
|
271 |
-
"directories = [value for key, value in PREFIXES.items()] # for unpucking zip files\n",
|
272 |
-
"!mkdir -p {\" \".join(directories)}\n",
|
273 |
-
"\n",
|
274 |
-
"hf_token = huggingface_token if huggingface_token else \"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\"\n",
|
275 |
-
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
276 |
"\n",
|
277 |
"''' Formatted Info Output '''\n",
|
278 |
"\n",
|
279 |
-
"from math import floor\n",
|
280 |
-
"\n",
|
281 |
"def center_text(text, terminal_width=45):\n",
|
282 |
" padding = (terminal_width - len(text)) // 2\n",
|
283 |
-
" return f\"
|
284 |
"\n",
|
285 |
"def format_output(url, dst_dir, file_name, image_name=None, image_url=None):\n",
|
286 |
" info = center_text(f\"[{file_name.split('.')[0]}]\")\n",
|
287 |
-
"
|
288 |
"\n",
|
289 |
-
" print(f\"\\n{
|
290 |
-
" print(f\"\\033[33mURL:
|
291 |
" print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
|
292 |
" print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
|
293 |
-
"\n",
|
294 |
" if 'civitai' in url and image_url:\n",
|
295 |
" print(f\"\\033[32m[Preview DL]:\\033[0m {image_name} - {image_url}\\n\")\n",
|
296 |
"\n",
|
@@ -326,7 +316,7 @@
|
|
326 |
" print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\")\n",
|
327 |
" return 'None', None, None, None, None, None, None\n",
|
328 |
"\n",
|
329 |
-
" def
|
330 |
" model_type = data['model']['type']\n",
|
331 |
" model_name = data['files'][0]['name']\n",
|
332 |
"\n",
|
@@ -338,17 +328,12 @@
|
|
338 |
"\n",
|
339 |
" return model_type, model_name\n",
|
340 |
"\n",
|
341 |
-
" model_type, model_name = extract_model_info(url, data)\n",
|
342 |
-
" model_name = file_name or model_name\n",
|
343 |
-
"\n",
|
344 |
" def get_download_url(data, model_type):\n",
|
345 |
" if any(t.lower() in model_type.lower() for t in SUPPORT_TYPES):\n",
|
346 |
" return data['files'][0]['downloadUrl']\n",
|
347 |
"\n",
|
348 |
" return data['files'][1]['downloadUrl'] if 'type' in url else data['files'][0]['downloadUrl']\n",
|
349 |
"\n",
|
350 |
-
" download_url = get_download_url(data, model_type)\n",
|
351 |
-
"\n",
|
352 |
" def get_image_info(data, model_type, model_name):\n",
|
353 |
" if not any(t in model_type for t in SUPPORT_TYPES):\n",
|
354 |
" return None, None\n",
|
@@ -360,9 +345,11 @@
|
|
360 |
" image_extension = image_url.split('.')[-1]\n",
|
361 |
" image_name = f\"{model_name.split('.')[0]}.preview.{image_extension}\" if image_url else None\n",
|
362 |
" return image_url, image_name\n",
|
363 |
-
"\n",
|
364 |
" return None, None\n",
|
365 |
"\n",
|
|
|
|
|
|
|
366 |
" image_url, image_name = get_image_info(data, model_type, model_name)\n",
|
367 |
"\n",
|
368 |
" return f\"{download_url}{'&' if '?' in download_url else '?'}token={CIVITAI_TOKEN}\", download_url, model_type, model_name, image_url, image_name, data\n",
|
@@ -388,7 +375,7 @@
|
|
388 |
" manual_download(url, dst_dir, file_name)\n",
|
389 |
"\n",
|
390 |
" # Unpuck ZIPs Files\n",
|
391 |
-
" for directory in
|
392 |
" for root, _, files in os.walk(directory):\n",
|
393 |
" for file in files:\n",
|
394 |
" if file.endswith(\".zip\"):\n",
|
@@ -418,7 +405,7 @@
|
|
418 |
" extension_repo.append((path, file_name))\n",
|
419 |
"\n",
|
420 |
"def manual_download(url, dst_dir, file_name, prefix=None):\n",
|
421 |
-
"
|
422 |
" aria2c_header = \"--header='User-Agent: Mozilla/5.0' --allow-overwrite=true\"\n",
|
423 |
" aria2_args = \"--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 --stderr=true -c -x16 -s16 -k1M -j5\"\n",
|
424 |
"\n",
|
@@ -431,14 +418,13 @@
|
|
431 |
" subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n",
|
432 |
"\n",
|
433 |
" elif 'github' in url or \"huggingface.co\" in url:\n",
|
434 |
-
"
|
435 |
"\n",
|
436 |
" \"\"\" Formatted info output \"\"\"\n",
|
437 |
-
" model_name_or_basename = file_name if file_name else basename\n",
|
438 |
" try:\n",
|
439 |
-
" format_output(clean_url
|
440 |
" except UnboundLocalError:\n",
|
441 |
-
" format_output(clean_url
|
442 |
"\n",
|
443 |
" # =====================\n",
|
444 |
" def run_aria2c(url, dst_dir, file_name=None, args=\"\", header=\"\"):\n",
|
@@ -463,7 +449,7 @@
|
|
463 |
"\n",
|
464 |
" # -- GitHub or Hugging Face --\n",
|
465 |
" elif 'github' in url or 'huggingface' in url:\n",
|
466 |
-
" run_aria2c(clean_url, dst_dir, basename, aria2_args,
|
467 |
"\n",
|
468 |
" # -- Other HTTP/Sources --\n",
|
469 |
" elif 'http' in url:\n",
|
@@ -473,50 +459,52 @@
|
|
473 |
"\n",
|
474 |
"# Separation of merged numbers\n",
|
475 |
"def split_numbers(num_str, max_num):\n",
|
476 |
-
"
|
477 |
-
"
|
478 |
-
"
|
|
|
479 |
" for length in range(2, 0, -1):\n",
|
480 |
-
" if len(
|
481 |
-
" part = int(
|
482 |
" if part <= max_num:\n",
|
483 |
-
" result
|
484 |
-
"
|
485 |
-
"
|
486 |
-
"
|
487 |
-
"
|
|
|
|
|
488 |
"\n",
|
489 |
"def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
|
490 |
-
" selected_models = []\n",
|
491 |
" if selection == \"none\":\n",
|
492 |
-
" return
|
|
|
|
|
493 |
" if selection == \"ALL\":\n",
|
494 |
-
"
|
495 |
-
" selected_models.extend(models)\n",
|
496 |
" else:\n",
|
497 |
" if selection in model_dict:\n",
|
498 |
" selected_models.extend(model_dict[selection])\n",
|
|
|
499 |
" nums = num_selection.replace(',', ' ').split()\n",
|
500 |
" max_num = len(model_dict)\n",
|
501 |
-
"
|
502 |
"\n",
|
503 |
" for num_part in nums:\n",
|
504 |
" split_nums = split_numbers(num_part, max_num)\n",
|
505 |
-
"
|
506 |
-
" selected_nums.extend(split_nums)\n",
|
507 |
-
"\n",
|
508 |
-
" unique_nums = list(set(selected_nums))\n",
|
509 |
"\n",
|
510 |
" for num in unique_nums:\n",
|
511 |
" if 1 <= num <= max_num:\n",
|
512 |
-
" name = list(model_dict)[num - 1]\n",
|
513 |
" selected_models.extend(model_dict[name])\n",
|
514 |
"\n",
|
515 |
-
" unique_models =
|
|
|
516 |
" for model in unique_models:\n",
|
517 |
" model['dst_dir'] = dst_dir\n",
|
518 |
"\n",
|
519 |
-
" return unique_models\n",
|
520 |
"\n",
|
521 |
"def handle_submodels(selection, num_selection, model_dict, dst_dir, url):\n",
|
522 |
" submodels = add_submodels(selection, num_selection, model_dict, dst_dir)\n",
|
@@ -526,6 +514,7 @@
|
|
526 |
" url += f\"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, \"\n",
|
527 |
" return url\n",
|
528 |
"\n",
|
|
|
529 |
"url = handle_submodels(model, model_num, model_list, models_dir, url)\n",
|
530 |
"url = handle_submodels(vae, vae_num, vae_list, vaes_dir, url)\n",
|
531 |
"url = handle_submodels(controlnet, controlnet_num, controlnet_list, control_dir, url)\n",
|
|
|
37 |
"UI = os.getenv('SDW_UI')\n",
|
38 |
"OLD_UI = os.getenv('SDW_OLD_UI')\n",
|
39 |
"\n",
|
40 |
+
"os.chdir(root_path)\n",
|
41 |
"\n",
|
42 |
"# ============ loading settings V4 =============\n",
|
43 |
"def load_settings(path):\n",
|
|
|
236 |
" !sed -i '521s/open=\\(False\\|True\\)/open=False/' {webui_path}/extensions/Umi-AI-Wildcards/scripts/wildcard_recursive.py # Closed accordion by default\n",
|
237 |
" # --- Encrypt-Image ---\n",
|
238 |
" !sed -i '9,37d' {webui_path}/extensions/Encrypt-Image/javascript/encrypt_images_info.js # Removes the weird text in webui\n",
|
|
|
|
|
239 |
"del cap\n",
|
240 |
"\n",
|
241 |
"\n",
|
|
|
254 |
"## Downloading model and stuff | oh~ Hey! If you're freaked out by that code too, don't worry, me too!\n",
|
255 |
"print(\"π¦ Downloading models and stuff...\", end='')\n",
|
256 |
"\n",
|
257 |
+
"extension_repo = []\n",
|
258 |
"PREFIXES = {\n",
|
259 |
" \"model\": models_dir,\n",
|
260 |
" \"vae\": vaes_dir,\n",
|
|
|
265 |
" \"adetailer\": adetailer_dir,\n",
|
266 |
" \"config\": webui_path\n",
|
267 |
"}\n",
|
268 |
+
"!mkdir -p {\" \".join(PREFIXES.values())}\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
269 |
"\n",
|
270 |
"''' Formatted Info Output '''\n",
|
271 |
"\n",
|
|
|
|
|
272 |
"def center_text(text, terminal_width=45):\n",
|
273 |
" padding = (terminal_width - len(text)) // 2\n",
|
274 |
+
" return f\"{' ' * padding}{text}{' ' * padding}\"\n",
|
275 |
"\n",
|
276 |
"def format_output(url, dst_dir, file_name, image_name=None, image_url=None):\n",
|
277 |
" info = center_text(f\"[{file_name.split('.')[0]}]\")\n",
|
278 |
+
" sep_line = '---' * 20\n",
|
279 |
"\n",
|
280 |
+
" print(f\"\\n\\033[32m{sep_line}\\033[36;1m{info}\\033[32m{sep_line}\\033[0m\")\n",
|
281 |
+
" print(f\"\\033[33mURL: {url}\")\n",
|
282 |
" print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
|
283 |
" print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
|
|
|
284 |
" if 'civitai' in url and image_url:\n",
|
285 |
" print(f\"\\033[32m[Preview DL]:\\033[0m {image_name} - {image_url}\\n\")\n",
|
286 |
"\n",
|
|
|
316 |
" print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\")\n",
|
317 |
" return 'None', None, None, None, None, None, None\n",
|
318 |
"\n",
|
319 |
+
" def get_model_info(url, data):\n",
|
320 |
" model_type = data['model']['type']\n",
|
321 |
" model_name = data['files'][0]['name']\n",
|
322 |
"\n",
|
|
|
328 |
"\n",
|
329 |
" return model_type, model_name\n",
|
330 |
"\n",
|
|
|
|
|
|
|
331 |
" def get_download_url(data, model_type):\n",
|
332 |
" if any(t.lower() in model_type.lower() for t in SUPPORT_TYPES):\n",
|
333 |
" return data['files'][0]['downloadUrl']\n",
|
334 |
"\n",
|
335 |
" return data['files'][1]['downloadUrl'] if 'type' in url else data['files'][0]['downloadUrl']\n",
|
336 |
"\n",
|
|
|
|
|
337 |
" def get_image_info(data, model_type, model_name):\n",
|
338 |
" if not any(t in model_type for t in SUPPORT_TYPES):\n",
|
339 |
" return None, None\n",
|
|
|
345 |
" image_extension = image_url.split('.')[-1]\n",
|
346 |
" image_name = f\"{model_name.split('.')[0]}.preview.{image_extension}\" if image_url else None\n",
|
347 |
" return image_url, image_name\n",
|
|
|
348 |
" return None, None\n",
|
349 |
"\n",
|
350 |
+
" model_type, model_name = get_model_info(url, data)\n",
|
351 |
+
" model_name = file_name or model_name\n",
|
352 |
+
" download_url = get_download_url(data, model_type)\n",
|
353 |
" image_url, image_name = get_image_info(data, model_type, model_name)\n",
|
354 |
"\n",
|
355 |
" return f\"{download_url}{'&' if '?' in download_url else '?'}token={CIVITAI_TOKEN}\", download_url, model_type, model_name, image_url, image_name, data\n",
|
|
|
375 |
" manual_download(url, dst_dir, file_name)\n",
|
376 |
"\n",
|
377 |
" # Unpuck ZIPs Files\n",
|
378 |
+
" for directory in PREFIXES.values():\n",
|
379 |
" for root, _, files in os.walk(directory):\n",
|
380 |
" for file in files:\n",
|
381 |
" if file.endswith(\".zip\"):\n",
|
|
|
405 |
" extension_repo.append((path, file_name))\n",
|
406 |
"\n",
|
407 |
"def manual_download(url, dst_dir, file_name, prefix=None):\n",
|
408 |
+
" hf_header = f\"--header='Authorization: Bearer {huggingface_token}'\" if huggingface_token else \"\"\n",
|
409 |
" aria2c_header = \"--header='User-Agent: Mozilla/5.0' --allow-overwrite=true\"\n",
|
410 |
" aria2_args = \"--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 --stderr=true -c -x16 -s16 -k1M -j5\"\n",
|
411 |
"\n",
|
|
|
418 |
" subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n",
|
419 |
"\n",
|
420 |
" elif 'github' in url or \"huggingface.co\" in url:\n",
|
421 |
+
" file_name = clean_url.split(\"/\")[-1] if file_name is None else file_name\n",
|
422 |
"\n",
|
423 |
" \"\"\" Formatted info output \"\"\"\n",
|
|
|
424 |
" try:\n",
|
425 |
+
" format_output(clean_url, dst_dir, file_name, image_name, image_url)\n",
|
426 |
" except UnboundLocalError:\n",
|
427 |
+
" format_output(clean_url, dst_dir, file_name, None, None)\n",
|
428 |
"\n",
|
429 |
" # =====================\n",
|
430 |
" def run_aria2c(url, dst_dir, file_name=None, args=\"\", header=\"\"):\n",
|
|
|
449 |
"\n",
|
450 |
" # -- GitHub or Hugging Face --\n",
|
451 |
" elif 'github' in url or 'huggingface' in url:\n",
|
452 |
+
" run_aria2c(clean_url, dst_dir, basename, aria2_args, hf_header if 'huggingface' in url else '')\n",
|
453 |
"\n",
|
454 |
" # -- Other HTTP/Sources --\n",
|
455 |
" elif 'http' in url:\n",
|
|
|
459 |
"\n",
|
460 |
"# Separation of merged numbers\n",
|
461 |
"def split_numbers(num_str, max_num):\n",
|
462 |
+
" result = []\n",
|
463 |
+
" i = 0\n",
|
464 |
+
" while i < len(num_str):\n",
|
465 |
+
" found = False\n",
|
466 |
" for length in range(2, 0, -1):\n",
|
467 |
+
" if i + length <= len(num_str):\n",
|
468 |
+
" part = int(num_str[i:i + length])\n",
|
469 |
" if part <= max_num:\n",
|
470 |
+
" result.append(part)\n",
|
471 |
+
" i += length\n",
|
472 |
+
" found = True\n",
|
473 |
+
" break\n",
|
474 |
+
" if not found:\n",
|
475 |
+
" break\n",
|
476 |
+
" return result\n",
|
477 |
"\n",
|
478 |
"def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
|
|
|
479 |
" if selection == \"none\":\n",
|
480 |
+
" return []\n",
|
481 |
+
" selected_models = []\n",
|
482 |
+
"\n",
|
483 |
" if selection == \"ALL\":\n",
|
484 |
+
" selected_models = sum(model_dict.values(), [])\n",
|
|
|
485 |
" else:\n",
|
486 |
" if selection in model_dict:\n",
|
487 |
" selected_models.extend(model_dict[selection])\n",
|
488 |
+
"\n",
|
489 |
" nums = num_selection.replace(',', ' ').split()\n",
|
490 |
" max_num = len(model_dict)\n",
|
491 |
+
" unique_nums = set()\n",
|
492 |
"\n",
|
493 |
" for num_part in nums:\n",
|
494 |
" split_nums = split_numbers(num_part, max_num)\n",
|
495 |
+
" unique_nums.update(split_nums)\n",
|
|
|
|
|
|
|
496 |
"\n",
|
497 |
" for num in unique_nums:\n",
|
498 |
" if 1 <= num <= max_num:\n",
|
499 |
+
" name = list(model_dict.keys())[num - 1]\n",
|
500 |
" selected_models.extend(model_dict[name])\n",
|
501 |
"\n",
|
502 |
+
" unique_models = {model['name']: model for model in selected_models}.values()\n",
|
503 |
+
"\n",
|
504 |
" for model in unique_models:\n",
|
505 |
" model['dst_dir'] = dst_dir\n",
|
506 |
"\n",
|
507 |
+
" return list(unique_models)\n",
|
508 |
"\n",
|
509 |
"def handle_submodels(selection, num_selection, model_dict, dst_dir, url):\n",
|
510 |
" submodels = add_submodels(selection, num_selection, model_dict, dst_dir)\n",
|
|
|
514 |
" url += f\"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, \"\n",
|
515 |
" return url\n",
|
516 |
"\n",
|
517 |
+
"url = \"\"\n",
|
518 |
"url = handle_submodels(model, model_num, model_list, models_dir, url)\n",
|
519 |
"url = handle_submodels(vae, vae_num, vae_list, vaes_dir, url)\n",
|
520 |
"url = handle_submodels(controlnet, controlnet_num, controlnet_list, control_dir, url)\n",
|
files_cells/notebooks/en/launch_en.ipynb
CHANGED
@@ -106,7 +106,6 @@
|
|
106 |
"# ================= Automatic Fixing Path V3 ================\n",
|
107 |
"paths_to_check = {\n",
|
108 |
" \"tagger_hf_cache_dir\": f\"{webui_path}/models/interrogators/\",\n",
|
109 |
-
" \"additional_networks_extra_lora_path\": f\"{webui_path}/models/Lora/\",\n",
|
110 |
" \"ad_extra_models_dir\": f\"{webui_path}/models/adetailer/\",\n",
|
111 |
" \"sd_checkpoint_hash\": \"\",\n",
|
112 |
" \"sd_model_checkpoint\": \"\",\n",
|
|
|
106 |
"# ================= Automatic Fixing Path V3 ================\n",
|
107 |
"paths_to_check = {\n",
|
108 |
" \"tagger_hf_cache_dir\": f\"{webui_path}/models/interrogators/\",\n",
|
|
|
109 |
" \"ad_extra_models_dir\": f\"{webui_path}/models/adetailer/\",\n",
|
110 |
" \"sd_checkpoint_hash\": \"\",\n",
|
111 |
" \"sd_model_checkpoint\": \"\",\n",
|
files_cells/notebooks/ru/auto_cleaner_ru.ipynb
CHANGED
@@ -60,7 +60,6 @@
|
|
60 |
"\"\"\" functions \"\"\"\n",
|
61 |
"def clean_directory(directory):\n",
|
62 |
" deleted_files = 0\n",
|
63 |
-
" image_dir = directories['ΠΠ·ΠΎΠ±ΡΠ°ΠΆΠ΅Π½ΠΈΡ']\n",
|
64 |
"\n",
|
65 |
" for root, dirs, files in os.walk(directory):\n",
|
66 |
" for file in files:\n",
|
@@ -68,7 +67,7 @@
|
|
68 |
"\n",
|
69 |
" if file.endswith(\".txt\"):\n",
|
70 |
" continue\n",
|
71 |
-
" if file.endswith((\".safetensors\", \".pt\"
|
72 |
" deleted_files += 1\n",
|
73 |
"\n",
|
74 |
" os.remove(file_path)\n",
|
|
|
60 |
"\"\"\" functions \"\"\"\n",
|
61 |
"def clean_directory(directory):\n",
|
62 |
" deleted_files = 0\n",
|
|
|
63 |
"\n",
|
64 |
" for root, dirs, files in os.walk(directory):\n",
|
65 |
" for file in files:\n",
|
|
|
67 |
"\n",
|
68 |
" if file.endswith(\".txt\"):\n",
|
69 |
" continue\n",
|
70 |
+
" if file.endswith((\".safetensors\", \".pt\", \".png\", \".jpg\", \".jpeg\")):\n",
|
71 |
" deleted_files += 1\n",
|
72 |
"\n",
|
73 |
" os.remove(file_path)\n",
|
files_cells/notebooks/ru/downloading_ru.ipynb
CHANGED
@@ -37,6 +37,7 @@
|
|
37 |
"UI = os.getenv('SDW_UI')\n",
|
38 |
"OLD_UI = os.getenv('SDW_OLD_UI')\n",
|
39 |
"\n",
|
|
|
40 |
"\n",
|
41 |
"# ============ loading settings V4 =============\n",
|
42 |
"def load_settings(path):\n",
|
@@ -235,8 +236,6 @@
|
|
235 |
" !sed -i '521s/open=\\(False\\|True\\)/open=False/' {webui_path}/extensions/Umi-AI-Wildcards/scripts/wildcard_recursive.py # Closed accordion by default\n",
|
236 |
" # --- Encrypt-Image ---\n",
|
237 |
" !sed -i '9,37d' {webui_path}/extensions/Encrypt-Image/javascript/encrypt_images_info.js # Removes the weird text in webui\n",
|
238 |
-
" # --- Additional-Networks ---\n",
|
239 |
-
" !wget -O {webui_path}/extensions/additional-networks/scripts/metadata_editor.py {anxety_repos}/extensions/Additional-Networks/fix/metadata_editor.py # Fixing an error due to old style\n",
|
240 |
"del cap\n",
|
241 |
"\n",
|
242 |
"\n",
|
@@ -255,7 +254,7 @@
|
|
255 |
"## Downloading model and stuff | oh~ Hey! If you're freaked out by that code too, don't worry, me too!\n",
|
256 |
"print(\"π¦ Π‘ΠΊΠ°ΡΠΈΠ²Π°Π½ΠΈΠ΅ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΠΈ ΠΏΡΠΎΡΠ΅Π³ΠΎ...\", end='')\n",
|
257 |
"\n",
|
258 |
-
"
|
259 |
"PREFIXES = {\n",
|
260 |
" \"model\": models_dir,\n",
|
261 |
" \"vae\": vaes_dir,\n",
|
@@ -266,31 +265,22 @@
|
|
266 |
" \"adetailer\": adetailer_dir,\n",
|
267 |
" \"config\": webui_path\n",
|
268 |
"}\n",
|
269 |
-
"\n",
|
270 |
-
"extension_repo = []\n",
|
271 |
-
"directories = [value for key, value in PREFIXES.items()] # for unpucking zip files\n",
|
272 |
-
"!mkdir -p {\" \".join(directories)}\n",
|
273 |
-
"\n",
|
274 |
-
"hf_token = huggingface_token if huggingface_token else \"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\"\n",
|
275 |
-
"user_header = f\"\\\"Authorization: Bearer {hf_token}\\\"\"\n",
|
276 |
"\n",
|
277 |
"''' Formatted Info Output '''\n",
|
278 |
"\n",
|
279 |
-
"from math import floor\n",
|
280 |
-
"\n",
|
281 |
"def center_text(text, terminal_width=45):\n",
|
282 |
" padding = (terminal_width - len(text)) // 2\n",
|
283 |
-
" return f\"
|
284 |
"\n",
|
285 |
"def format_output(url, dst_dir, file_name, image_name=None, image_url=None):\n",
|
286 |
" info = center_text(f\"[{file_name.split('.')[0]}]\")\n",
|
287 |
-
"
|
288 |
"\n",
|
289 |
-
" print(f\"\\n{
|
290 |
-
" print(f\"\\033[33mURL:
|
291 |
" print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
|
292 |
" print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
|
293 |
-
"\n",
|
294 |
" if 'civitai' in url and image_url:\n",
|
295 |
" print(f\"\\033[32m[Preview DL]:\\033[0m {image_name} - {image_url}\\n\")\n",
|
296 |
"\n",
|
@@ -326,7 +316,7 @@
|
|
326 |
" print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\")\n",
|
327 |
" return 'None', None, None, None, None, None, None\n",
|
328 |
"\n",
|
329 |
-
" def
|
330 |
" model_type = data['model']['type']\n",
|
331 |
" model_name = data['files'][0]['name']\n",
|
332 |
"\n",
|
@@ -338,17 +328,12 @@
|
|
338 |
"\n",
|
339 |
" return model_type, model_name\n",
|
340 |
"\n",
|
341 |
-
" model_type, model_name = extract_model_info(url, data)\n",
|
342 |
-
" model_name = file_name or model_name\n",
|
343 |
-
"\n",
|
344 |
" def get_download_url(data, model_type):\n",
|
345 |
" if any(t.lower() in model_type.lower() for t in SUPPORT_TYPES):\n",
|
346 |
" return data['files'][0]['downloadUrl']\n",
|
347 |
"\n",
|
348 |
" return data['files'][1]['downloadUrl'] if 'type' in url else data['files'][0]['downloadUrl']\n",
|
349 |
"\n",
|
350 |
-
" download_url = get_download_url(data, model_type)\n",
|
351 |
-
"\n",
|
352 |
" def get_image_info(data, model_type, model_name):\n",
|
353 |
" if not any(t in model_type for t in SUPPORT_TYPES):\n",
|
354 |
" return None, None\n",
|
@@ -360,9 +345,11 @@
|
|
360 |
" image_extension = image_url.split('.')[-1]\n",
|
361 |
" image_name = f\"{model_name.split('.')[0]}.preview.{image_extension}\" if image_url else None\n",
|
362 |
" return image_url, image_name\n",
|
363 |
-
"\n",
|
364 |
" return None, None\n",
|
365 |
"\n",
|
|
|
|
|
|
|
366 |
" image_url, image_name = get_image_info(data, model_type, model_name)\n",
|
367 |
"\n",
|
368 |
" return f\"{download_url}{'&' if '?' in download_url else '?'}token={CIVITAI_TOKEN}\", download_url, model_type, model_name, image_url, image_name, data\n",
|
@@ -388,7 +375,7 @@
|
|
388 |
" manual_download(url, dst_dir, file_name)\n",
|
389 |
"\n",
|
390 |
" # Unpuck ZIPs Files\n",
|
391 |
-
" for directory in
|
392 |
" for root, _, files in os.walk(directory):\n",
|
393 |
" for file in files:\n",
|
394 |
" if file.endswith(\".zip\"):\n",
|
@@ -418,7 +405,7 @@
|
|
418 |
" extension_repo.append((path, file_name))\n",
|
419 |
"\n",
|
420 |
"def manual_download(url, dst_dir, file_name, prefix=None):\n",
|
421 |
-
"
|
422 |
" aria2c_header = \"--header='User-Agent: Mozilla/5.0' --allow-overwrite=true\"\n",
|
423 |
" aria2_args = \"--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 --stderr=true -c -x16 -s16 -k1M -j5\"\n",
|
424 |
"\n",
|
@@ -431,14 +418,13 @@
|
|
431 |
" subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n",
|
432 |
"\n",
|
433 |
" elif 'github' in url or \"huggingface.co\" in url:\n",
|
434 |
-
"
|
435 |
"\n",
|
436 |
" \"\"\" Formatted info output \"\"\"\n",
|
437 |
-
" model_name_or_basename = file_name if file_name else basename\n",
|
438 |
" try:\n",
|
439 |
-
" format_output(clean_url
|
440 |
" except UnboundLocalError:\n",
|
441 |
-
" format_output(clean_url
|
442 |
"\n",
|
443 |
" # =====================\n",
|
444 |
" def run_aria2c(url, dst_dir, file_name=None, args=\"\", header=\"\"):\n",
|
@@ -463,7 +449,7 @@
|
|
463 |
"\n",
|
464 |
" # -- GitHub or Hugging Face --\n",
|
465 |
" elif 'github' in url or 'huggingface' in url:\n",
|
466 |
-
" run_aria2c(clean_url, dst_dir, basename, aria2_args,
|
467 |
"\n",
|
468 |
" # -- Other HTTP/Sources --\n",
|
469 |
" elif 'http' in url:\n",
|
@@ -473,50 +459,52 @@
|
|
473 |
"\n",
|
474 |
"# Separation of merged numbers\n",
|
475 |
"def split_numbers(num_str, max_num):\n",
|
476 |
-
"
|
477 |
-
"
|
478 |
-
"
|
|
|
479 |
" for length in range(2, 0, -1):\n",
|
480 |
-
" if len(
|
481 |
-
" part = int(
|
482 |
" if part <= max_num:\n",
|
483 |
-
" result
|
484 |
-
"
|
485 |
-
"
|
486 |
-
"
|
487 |
-
"
|
|
|
|
|
488 |
"\n",
|
489 |
"def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
|
490 |
-
" selected_models = []\n",
|
491 |
" if selection == \"none\":\n",
|
492 |
-
" return
|
|
|
|
|
493 |
" if selection == \"ALL\":\n",
|
494 |
-
"
|
495 |
-
" selected_models.extend(models)\n",
|
496 |
" else:\n",
|
497 |
" if selection in model_dict:\n",
|
498 |
" selected_models.extend(model_dict[selection])\n",
|
|
|
499 |
" nums = num_selection.replace(',', ' ').split()\n",
|
500 |
" max_num = len(model_dict)\n",
|
501 |
-
"
|
502 |
"\n",
|
503 |
" for num_part in nums:\n",
|
504 |
" split_nums = split_numbers(num_part, max_num)\n",
|
505 |
-
"
|
506 |
-
" selected_nums.extend(split_nums)\n",
|
507 |
-
"\n",
|
508 |
-
" unique_nums = list(set(selected_nums))\n",
|
509 |
"\n",
|
510 |
" for num in unique_nums:\n",
|
511 |
" if 1 <= num <= max_num:\n",
|
512 |
-
" name = list(model_dict)[num - 1]\n",
|
513 |
" selected_models.extend(model_dict[name])\n",
|
514 |
"\n",
|
515 |
-
" unique_models =
|
|
|
516 |
" for model in unique_models:\n",
|
517 |
" model['dst_dir'] = dst_dir\n",
|
518 |
"\n",
|
519 |
-
" return unique_models\n",
|
520 |
"\n",
|
521 |
"def handle_submodels(selection, num_selection, model_dict, dst_dir, url):\n",
|
522 |
" submodels = add_submodels(selection, num_selection, model_dict, dst_dir)\n",
|
@@ -526,6 +514,7 @@
|
|
526 |
" url += f\"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, \"\n",
|
527 |
" return url\n",
|
528 |
"\n",
|
|
|
529 |
"url = handle_submodels(model, model_num, model_list, models_dir, url)\n",
|
530 |
"url = handle_submodels(vae, vae_num, vae_list, vaes_dir, url)\n",
|
531 |
"url = handle_submodels(controlnet, controlnet_num, controlnet_list, control_dir, url)\n",
|
|
|
37 |
"UI = os.getenv('SDW_UI')\n",
|
38 |
"OLD_UI = os.getenv('SDW_OLD_UI')\n",
|
39 |
"\n",
|
40 |
+
"os.chdir(root_path)\n",
|
41 |
"\n",
|
42 |
"# ============ loading settings V4 =============\n",
|
43 |
"def load_settings(path):\n",
|
|
|
236 |
" !sed -i '521s/open=\\(False\\|True\\)/open=False/' {webui_path}/extensions/Umi-AI-Wildcards/scripts/wildcard_recursive.py # Closed accordion by default\n",
|
237 |
" # --- Encrypt-Image ---\n",
|
238 |
" !sed -i '9,37d' {webui_path}/extensions/Encrypt-Image/javascript/encrypt_images_info.js # Removes the weird text in webui\n",
|
|
|
|
|
239 |
"del cap\n",
|
240 |
"\n",
|
241 |
"\n",
|
|
|
254 |
"## Downloading model and stuff | oh~ Hey! If you're freaked out by that code too, don't worry, me too!\n",
|
255 |
"print(\"π¦ Π‘ΠΊΠ°ΡΠΈΠ²Π°Π½ΠΈΠ΅ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΠΈ ΠΏΡΠΎΡΠ΅Π³ΠΎ...\", end='')\n",
|
256 |
"\n",
|
257 |
+
"extension_repo = []\n",
|
258 |
"PREFIXES = {\n",
|
259 |
" \"model\": models_dir,\n",
|
260 |
" \"vae\": vaes_dir,\n",
|
|
|
265 |
" \"adetailer\": adetailer_dir,\n",
|
266 |
" \"config\": webui_path\n",
|
267 |
"}\n",
|
268 |
+
"!mkdir -p {\" \".join(PREFIXES.values())}\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
269 |
"\n",
|
270 |
"''' Formatted Info Output '''\n",
|
271 |
"\n",
|
|
|
|
|
272 |
"def center_text(text, terminal_width=45):\n",
|
273 |
" padding = (terminal_width - len(text)) // 2\n",
|
274 |
+
" return f\"{' ' * padding}{text}{' ' * padding}\"\n",
|
275 |
"\n",
|
276 |
"def format_output(url, dst_dir, file_name, image_name=None, image_url=None):\n",
|
277 |
" info = center_text(f\"[{file_name.split('.')[0]}]\")\n",
|
278 |
+
" sep_line = '---' * 20\n",
|
279 |
"\n",
|
280 |
+
" print(f\"\\n\\033[32m{sep_line}\\033[36;1m{info}\\033[32m{sep_line}\\033[0m\")\n",
|
281 |
+
" print(f\"\\033[33mURL: {url}\")\n",
|
282 |
" print(f\"\\033[33mSAVE DIR: \\033[34m{dst_dir}\")\n",
|
283 |
" print(f\"\\033[33mFILE NAME: \\033[34m{file_name}\\033[0m\")\n",
|
|
|
284 |
" if 'civitai' in url and image_url:\n",
|
285 |
" print(f\"\\033[32m[Preview DL]:\\033[0m {image_name} - {image_url}\\n\")\n",
|
286 |
"\n",
|
|
|
316 |
" print(\"\\033[31m[Data Info]:\\033[0m Failed to retrieve data from the API.\\n\")\n",
|
317 |
" return 'None', None, None, None, None, None, None\n",
|
318 |
"\n",
|
319 |
+
" def get_model_info(url, data):\n",
|
320 |
" model_type = data['model']['type']\n",
|
321 |
" model_name = data['files'][0]['name']\n",
|
322 |
"\n",
|
|
|
328 |
"\n",
|
329 |
" return model_type, model_name\n",
|
330 |
"\n",
|
|
|
|
|
|
|
331 |
" def get_download_url(data, model_type):\n",
|
332 |
" if any(t.lower() in model_type.lower() for t in SUPPORT_TYPES):\n",
|
333 |
" return data['files'][0]['downloadUrl']\n",
|
334 |
"\n",
|
335 |
" return data['files'][1]['downloadUrl'] if 'type' in url else data['files'][0]['downloadUrl']\n",
|
336 |
"\n",
|
|
|
|
|
337 |
" def get_image_info(data, model_type, model_name):\n",
|
338 |
" if not any(t in model_type for t in SUPPORT_TYPES):\n",
|
339 |
" return None, None\n",
|
|
|
345 |
" image_extension = image_url.split('.')[-1]\n",
|
346 |
" image_name = f\"{model_name.split('.')[0]}.preview.{image_extension}\" if image_url else None\n",
|
347 |
" return image_url, image_name\n",
|
|
|
348 |
" return None, None\n",
|
349 |
"\n",
|
350 |
+
" model_type, model_name = get_model_info(url, data)\n",
|
351 |
+
" model_name = file_name or model_name\n",
|
352 |
+
" download_url = get_download_url(data, model_type)\n",
|
353 |
" image_url, image_name = get_image_info(data, model_type, model_name)\n",
|
354 |
"\n",
|
355 |
" return f\"{download_url}{'&' if '?' in download_url else '?'}token={CIVITAI_TOKEN}\", download_url, model_type, model_name, image_url, image_name, data\n",
|
|
|
375 |
" manual_download(url, dst_dir, file_name)\n",
|
376 |
"\n",
|
377 |
" # Unpuck ZIPs Files\n",
|
378 |
+
" for directory in PREFIXES.values():\n",
|
379 |
" for root, _, files in os.walk(directory):\n",
|
380 |
" for file in files:\n",
|
381 |
" if file.endswith(\".zip\"):\n",
|
|
|
405 |
" extension_repo.append((path, file_name))\n",
|
406 |
"\n",
|
407 |
"def manual_download(url, dst_dir, file_name, prefix=None):\n",
|
408 |
+
" hf_header = f\"--header='Authorization: Bearer {huggingface_token}'\" if huggingface_token else \"\"\n",
|
409 |
" aria2c_header = \"--header='User-Agent: Mozilla/5.0' --allow-overwrite=true\"\n",
|
410 |
" aria2_args = \"--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 --stderr=true -c -x16 -s16 -k1M -j5\"\n",
|
411 |
"\n",
|
|
|
418 |
" subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n",
|
419 |
"\n",
|
420 |
" elif 'github' in url or \"huggingface.co\" in url:\n",
|
421 |
+
" file_name = clean_url.split(\"/\")[-1] if file_name is None else file_name\n",
|
422 |
"\n",
|
423 |
" \"\"\" Formatted info output \"\"\"\n",
|
|
|
424 |
" try:\n",
|
425 |
+
" format_output(clean_url, dst_dir, file_name, image_name, image_url)\n",
|
426 |
" except UnboundLocalError:\n",
|
427 |
+
" format_output(clean_url, dst_dir, file_name, None, None)\n",
|
428 |
"\n",
|
429 |
" # =====================\n",
|
430 |
" def run_aria2c(url, dst_dir, file_name=None, args=\"\", header=\"\"):\n",
|
|
|
449 |
"\n",
|
450 |
" # -- GitHub or Hugging Face --\n",
|
451 |
" elif 'github' in url or 'huggingface' in url:\n",
|
452 |
+
" run_aria2c(clean_url, dst_dir, basename, aria2_args, hf_header if 'huggingface' in url else '')\n",
|
453 |
"\n",
|
454 |
" # -- Other HTTP/Sources --\n",
|
455 |
" elif 'http' in url:\n",
|
|
|
459 |
"\n",
|
460 |
"# Separation of merged numbers\n",
|
461 |
"def split_numbers(num_str, max_num):\n",
|
462 |
+
" result = []\n",
|
463 |
+
" i = 0\n",
|
464 |
+
" while i < len(num_str):\n",
|
465 |
+
" found = False\n",
|
466 |
" for length in range(2, 0, -1):\n",
|
467 |
+
" if i + length <= len(num_str):\n",
|
468 |
+
" part = int(num_str[i:i + length])\n",
|
469 |
" if part <= max_num:\n",
|
470 |
+
" result.append(part)\n",
|
471 |
+
" i += length\n",
|
472 |
+
" found = True\n",
|
473 |
+
" break\n",
|
474 |
+
" if not found:\n",
|
475 |
+
" break\n",
|
476 |
+
" return result\n",
|
477 |
"\n",
|
478 |
"def add_submodels(selection, num_selection, model_dict, dst_dir):\n",
|
|
|
479 |
" if selection == \"none\":\n",
|
480 |
+
" return []\n",
|
481 |
+
" selected_models = []\n",
|
482 |
+
"\n",
|
483 |
" if selection == \"ALL\":\n",
|
484 |
+
" selected_models = sum(model_dict.values(), [])\n",
|
|
|
485 |
" else:\n",
|
486 |
" if selection in model_dict:\n",
|
487 |
" selected_models.extend(model_dict[selection])\n",
|
488 |
+
"\n",
|
489 |
" nums = num_selection.replace(',', ' ').split()\n",
|
490 |
" max_num = len(model_dict)\n",
|
491 |
+
" unique_nums = set()\n",
|
492 |
"\n",
|
493 |
" for num_part in nums:\n",
|
494 |
" split_nums = split_numbers(num_part, max_num)\n",
|
495 |
+
" unique_nums.update(split_nums)\n",
|
|
|
|
|
|
|
496 |
"\n",
|
497 |
" for num in unique_nums:\n",
|
498 |
" if 1 <= num <= max_num:\n",
|
499 |
+
" name = list(model_dict.keys())[num - 1]\n",
|
500 |
" selected_models.extend(model_dict[name])\n",
|
501 |
"\n",
|
502 |
+
" unique_models = {model['name']: model for model in selected_models}.values()\n",
|
503 |
+
"\n",
|
504 |
" for model in unique_models:\n",
|
505 |
" model['dst_dir'] = dst_dir\n",
|
506 |
"\n",
|
507 |
+
" return list(unique_models)\n",
|
508 |
"\n",
|
509 |
"def handle_submodels(selection, num_selection, model_dict, dst_dir, url):\n",
|
510 |
" submodels = add_submodels(selection, num_selection, model_dict, dst_dir)\n",
|
|
|
514 |
" url += f\"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, \"\n",
|
515 |
" return url\n",
|
516 |
"\n",
|
517 |
+
"url = \"\"\n",
|
518 |
"url = handle_submodels(model, model_num, model_list, models_dir, url)\n",
|
519 |
"url = handle_submodels(vae, vae_num, vae_list, vaes_dir, url)\n",
|
520 |
"url = handle_submodels(controlnet, controlnet_num, controlnet_list, control_dir, url)\n",
|
files_cells/notebooks/ru/launch_ru.ipynb
CHANGED
@@ -106,7 +106,6 @@
|
|
106 |
"# ================= Automatic Fixing Path V3 ================\n",
|
107 |
"paths_to_check = {\n",
|
108 |
" \"tagger_hf_cache_dir\": f\"{webui_path}/models/interrogators/\",\n",
|
109 |
-
" \"additional_networks_extra_lora_path\": f\"{webui_path}/models/Lora/\",\n",
|
110 |
" \"ad_extra_models_dir\": f\"{webui_path}/models/adetailer/\",\n",
|
111 |
" \"sd_checkpoint_hash\": \"\",\n",
|
112 |
" \"sd_model_checkpoint\": \"\",\n",
|
|
|
106 |
"# ================= Automatic Fixing Path V3 ================\n",
|
107 |
"paths_to_check = {\n",
|
108 |
" \"tagger_hf_cache_dir\": f\"{webui_path}/models/interrogators/\",\n",
|
|
|
109 |
" \"ad_extra_models_dir\": f\"{webui_path}/models/adetailer/\",\n",
|
110 |
" \"sd_checkpoint_hash\": \"\",\n",
|
111 |
" \"sd_model_checkpoint\": \"\",\n",
|
files_cells/python/en/auto_cleaner_en.py
CHANGED
@@ -36,7 +36,6 @@ directories = {
|
|
36 |
""" functions """
|
37 |
def clean_directory(directory):
|
38 |
deleted_files = 0
|
39 |
-
image_dir = directories['Images']
|
40 |
|
41 |
for root, dirs, files in os.walk(directory):
|
42 |
for file in files:
|
@@ -44,7 +43,7 @@ def clean_directory(directory):
|
|
44 |
|
45 |
if file.endswith(".txt"):
|
46 |
continue
|
47 |
-
if file.endswith((".safetensors", ".pt"
|
48 |
deleted_files += 1
|
49 |
|
50 |
os.remove(file_path)
|
|
|
36 |
""" functions """
|
37 |
def clean_directory(directory):
|
38 |
deleted_files = 0
|
|
|
39 |
|
40 |
for root, dirs, files in os.walk(directory):
|
41 |
for file in files:
|
|
|
43 |
|
44 |
if file.endswith(".txt"):
|
45 |
continue
|
46 |
+
if file.endswith((".safetensors", ".pt", ".png", ".jpg", ".jpeg")):
|
47 |
deleted_files += 1
|
48 |
|
49 |
os.remove(file_path)
|
files_cells/python/en/downloading_en.py
CHANGED
@@ -27,6 +27,7 @@ free_plan = os.getenv('FREE_PLAN')
|
|
27 |
UI = os.getenv('SDW_UI')
|
28 |
OLD_UI = os.getenv('SDW_OLD_UI')
|
29 |
|
|
|
30 |
|
31 |
# ============ loading settings V4 =============
|
32 |
def load_settings(path):
|
@@ -225,8 +226,6 @@ with capture.capture_output() as cap:
|
|
225 |
get_ipython().system("sed -i '521s/open=\\(False\\|True\\)/open=False/' {webui_path}/extensions/Umi-AI-Wildcards/scripts/wildcard_recursive.py # Closed accordion by default")
|
226 |
# --- Encrypt-Image ---
|
227 |
get_ipython().system("sed -i '9,37d' {webui_path}/extensions/Encrypt-Image/javascript/encrypt_images_info.js # Removes the weird text in webui")
|
228 |
-
# --- Additional-Networks ---
|
229 |
-
get_ipython().system('wget -O {webui_path}/extensions/additional-networks/scripts/metadata_editor.py {anxety_repos}/extensions/Additional-Networks/fix/metadata_editor.py # Fixing an error due to old style')
|
230 |
del cap
|
231 |
|
232 |
|
@@ -245,7 +244,7 @@ if commit_hash:
|
|
245 |
## Downloading model and stuff | oh~ Hey! If you're freaked out by that code too, don't worry, me too!
|
246 |
print("π¦ Downloading models and stuff...", end='')
|
247 |
|
248 |
-
|
249 |
PREFIXES = {
|
250 |
"model": models_dir,
|
251 |
"vae": vaes_dir,
|
@@ -256,31 +255,22 @@ PREFIXES = {
|
|
256 |
"adetailer": adetailer_dir,
|
257 |
"config": webui_path
|
258 |
}
|
259 |
-
|
260 |
-
extension_repo = []
|
261 |
-
directories = [value for key, value in PREFIXES.items()] # for unpucking zip files
|
262 |
-
get_ipython().system('mkdir -p {" ".join(directories)}')
|
263 |
-
|
264 |
-
hf_token = huggingface_token if huggingface_token else "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"
|
265 |
-
user_header = f"\"Authorization: Bearer {hf_token}\""
|
266 |
|
267 |
''' Formatted Info Output '''
|
268 |
|
269 |
-
from math import floor
|
270 |
-
|
271 |
def center_text(text, terminal_width=45):
|
272 |
padding = (terminal_width - len(text)) // 2
|
273 |
-
return f"
|
274 |
|
275 |
def format_output(url, dst_dir, file_name, image_name=None, image_url=None):
|
276 |
info = center_text(f"[{file_name.split('.')[0]}]")
|
277 |
-
|
278 |
|
279 |
-
print(f"\n{
|
280 |
-
print(f"\033[33mURL:
|
281 |
print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
|
282 |
print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
|
283 |
-
|
284 |
if 'civitai' in url and image_url:
|
285 |
print(f"\033[32m[Preview DL]:\033[0m {image_name} - {image_url}\n")
|
286 |
|
@@ -316,7 +306,7 @@ def CivitAi_API(url, file_name=None):
|
|
316 |
print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n")
|
317 |
return 'None', None, None, None, None, None, None
|
318 |
|
319 |
-
def
|
320 |
model_type = data['model']['type']
|
321 |
model_name = data['files'][0]['name']
|
322 |
|
@@ -328,17 +318,12 @@ def CivitAi_API(url, file_name=None):
|
|
328 |
|
329 |
return model_type, model_name
|
330 |
|
331 |
-
model_type, model_name = extract_model_info(url, data)
|
332 |
-
model_name = file_name or model_name
|
333 |
-
|
334 |
def get_download_url(data, model_type):
|
335 |
if any(t.lower() in model_type.lower() for t in SUPPORT_TYPES):
|
336 |
return data['files'][0]['downloadUrl']
|
337 |
|
338 |
return data['files'][1]['downloadUrl'] if 'type' in url else data['files'][0]['downloadUrl']
|
339 |
|
340 |
-
download_url = get_download_url(data, model_type)
|
341 |
-
|
342 |
def get_image_info(data, model_type, model_name):
|
343 |
if not any(t in model_type for t in SUPPORT_TYPES):
|
344 |
return None, None
|
@@ -350,9 +335,11 @@ def CivitAi_API(url, file_name=None):
|
|
350 |
image_extension = image_url.split('.')[-1]
|
351 |
image_name = f"{model_name.split('.')[0]}.preview.{image_extension}" if image_url else None
|
352 |
return image_url, image_name
|
353 |
-
|
354 |
return None, None
|
355 |
|
|
|
|
|
|
|
356 |
image_url, image_name = get_image_info(data, model_type, model_name)
|
357 |
|
358 |
return f"{download_url}{'&' if '?' in download_url else '?'}token={CIVITAI_TOKEN}", download_url, model_type, model_name, image_url, image_name, data
|
@@ -378,7 +365,7 @@ def download(url):
|
|
378 |
manual_download(url, dst_dir, file_name)
|
379 |
|
380 |
# Unpuck ZIPs Files
|
381 |
-
for directory in
|
382 |
for root, _, files in os.walk(directory):
|
383 |
for file in files:
|
384 |
if file.endswith(".zip"):
|
@@ -408,7 +395,7 @@ def handle_manual(url):
|
|
408 |
extension_repo.append((path, file_name))
|
409 |
|
410 |
def manual_download(url, dst_dir, file_name, prefix=None):
|
411 |
-
|
412 |
aria2c_header = "--header='User-Agent: Mozilla/5.0' --allow-overwrite=true"
|
413 |
aria2_args = "--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 --stderr=true -c -x16 -s16 -k1M -j5"
|
414 |
|
@@ -421,14 +408,13 @@ def manual_download(url, dst_dir, file_name, prefix=None):
|
|
421 |
subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
422 |
|
423 |
elif 'github' in url or "huggingface.co" in url:
|
424 |
-
|
425 |
|
426 |
""" Formatted info output """
|
427 |
-
model_name_or_basename = file_name if file_name else basename
|
428 |
try:
|
429 |
-
format_output(clean_url
|
430 |
except UnboundLocalError:
|
431 |
-
format_output(clean_url
|
432 |
|
433 |
# =====================
|
434 |
def run_aria2c(url, dst_dir, file_name=None, args="", header=""):
|
@@ -453,7 +439,7 @@ def manual_download(url, dst_dir, file_name, prefix=None):
|
|
453 |
|
454 |
# -- GitHub or Hugging Face --
|
455 |
elif 'github' in url or 'huggingface' in url:
|
456 |
-
run_aria2c(clean_url, dst_dir, basename, aria2_args,
|
457 |
|
458 |
# -- Other HTTP/Sources --
|
459 |
elif 'http' in url:
|
@@ -463,50 +449,52 @@ def manual_download(url, dst_dir, file_name, prefix=None):
|
|
463 |
|
464 |
# Separation of merged numbers
|
465 |
def split_numbers(num_str, max_num):
|
466 |
-
|
467 |
-
|
468 |
-
|
|
|
469 |
for length in range(2, 0, -1):
|
470 |
-
if len(
|
471 |
-
part = int(
|
472 |
if part <= max_num:
|
473 |
-
result
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
|
|
|
|
478 |
|
479 |
def add_submodels(selection, num_selection, model_dict, dst_dir):
|
480 |
-
selected_models = []
|
481 |
if selection == "none":
|
482 |
-
return
|
|
|
|
|
483 |
if selection == "ALL":
|
484 |
-
|
485 |
-
selected_models.extend(models)
|
486 |
else:
|
487 |
if selection in model_dict:
|
488 |
selected_models.extend(model_dict[selection])
|
|
|
489 |
nums = num_selection.replace(',', ' ').split()
|
490 |
max_num = len(model_dict)
|
491 |
-
|
492 |
|
493 |
for num_part in nums:
|
494 |
split_nums = split_numbers(num_part, max_num)
|
495 |
-
|
496 |
-
selected_nums.extend(split_nums)
|
497 |
-
|
498 |
-
unique_nums = list(set(selected_nums))
|
499 |
|
500 |
for num in unique_nums:
|
501 |
if 1 <= num <= max_num:
|
502 |
-
name = list(model_dict)[num - 1]
|
503 |
selected_models.extend(model_dict[name])
|
504 |
|
505 |
-
unique_models =
|
|
|
506 |
for model in unique_models:
|
507 |
model['dst_dir'] = dst_dir
|
508 |
|
509 |
-
return unique_models
|
510 |
|
511 |
def handle_submodels(selection, num_selection, model_dict, dst_dir, url):
|
512 |
submodels = add_submodels(selection, num_selection, model_dict, dst_dir)
|
@@ -516,6 +504,7 @@ def handle_submodels(selection, num_selection, model_dict, dst_dir, url):
|
|
516 |
url += f"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, "
|
517 |
return url
|
518 |
|
|
|
519 |
url = handle_submodels(model, model_num, model_list, models_dir, url)
|
520 |
url = handle_submodels(vae, vae_num, vae_list, vaes_dir, url)
|
521 |
url = handle_submodels(controlnet, controlnet_num, controlnet_list, control_dir, url)
|
|
|
27 |
UI = os.getenv('SDW_UI')
|
28 |
OLD_UI = os.getenv('SDW_OLD_UI')
|
29 |
|
30 |
+
os.chdir(root_path)
|
31 |
|
32 |
# ============ loading settings V4 =============
|
33 |
def load_settings(path):
|
|
|
226 |
get_ipython().system("sed -i '521s/open=\\(False\\|True\\)/open=False/' {webui_path}/extensions/Umi-AI-Wildcards/scripts/wildcard_recursive.py # Closed accordion by default")
|
227 |
# --- Encrypt-Image ---
|
228 |
get_ipython().system("sed -i '9,37d' {webui_path}/extensions/Encrypt-Image/javascript/encrypt_images_info.js # Removes the weird text in webui")
|
|
|
|
|
229 |
del cap
|
230 |
|
231 |
|
|
|
244 |
## Downloading model and stuff | oh~ Hey! If you're freaked out by that code too, don't worry, me too!
|
245 |
print("π¦ Downloading models and stuff...", end='')
|
246 |
|
247 |
+
extension_repo = []
|
248 |
PREFIXES = {
|
249 |
"model": models_dir,
|
250 |
"vae": vaes_dir,
|
|
|
255 |
"adetailer": adetailer_dir,
|
256 |
"config": webui_path
|
257 |
}
|
258 |
+
get_ipython().system('mkdir -p {" ".join(PREFIXES.values())}')
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
|
260 |
''' Formatted Info Output '''
|
261 |
|
|
|
|
|
262 |
def center_text(text, terminal_width=45):
|
263 |
padding = (terminal_width - len(text)) // 2
|
264 |
+
return f"{' ' * padding}{text}{' ' * padding}"
|
265 |
|
266 |
def format_output(url, dst_dir, file_name, image_name=None, image_url=None):
|
267 |
info = center_text(f"[{file_name.split('.')[0]}]")
|
268 |
+
sep_line = '---' * 20
|
269 |
|
270 |
+
print(f"\n\033[32m{sep_line}\033[36;1m{info}\033[32m{sep_line}\033[0m")
|
271 |
+
print(f"\033[33mURL: {url}")
|
272 |
print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
|
273 |
print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
|
|
|
274 |
if 'civitai' in url and image_url:
|
275 |
print(f"\033[32m[Preview DL]:\033[0m {image_name} - {image_url}\n")
|
276 |
|
|
|
306 |
print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n")
|
307 |
return 'None', None, None, None, None, None, None
|
308 |
|
309 |
+
def get_model_info(url, data):
|
310 |
model_type = data['model']['type']
|
311 |
model_name = data['files'][0]['name']
|
312 |
|
|
|
318 |
|
319 |
return model_type, model_name
|
320 |
|
|
|
|
|
|
|
321 |
def get_download_url(data, model_type):
|
322 |
if any(t.lower() in model_type.lower() for t in SUPPORT_TYPES):
|
323 |
return data['files'][0]['downloadUrl']
|
324 |
|
325 |
return data['files'][1]['downloadUrl'] if 'type' in url else data['files'][0]['downloadUrl']
|
326 |
|
|
|
|
|
327 |
def get_image_info(data, model_type, model_name):
|
328 |
if not any(t in model_type for t in SUPPORT_TYPES):
|
329 |
return None, None
|
|
|
335 |
image_extension = image_url.split('.')[-1]
|
336 |
image_name = f"{model_name.split('.')[0]}.preview.{image_extension}" if image_url else None
|
337 |
return image_url, image_name
|
|
|
338 |
return None, None
|
339 |
|
340 |
+
model_type, model_name = get_model_info(url, data)
|
341 |
+
model_name = file_name or model_name
|
342 |
+
download_url = get_download_url(data, model_type)
|
343 |
image_url, image_name = get_image_info(data, model_type, model_name)
|
344 |
|
345 |
return f"{download_url}{'&' if '?' in download_url else '?'}token={CIVITAI_TOKEN}", download_url, model_type, model_name, image_url, image_name, data
|
|
|
365 |
manual_download(url, dst_dir, file_name)
|
366 |
|
367 |
# Unpuck ZIPs Files
|
368 |
+
for directory in PREFIXES.values():
|
369 |
for root, _, files in os.walk(directory):
|
370 |
for file in files:
|
371 |
if file.endswith(".zip"):
|
|
|
395 |
extension_repo.append((path, file_name))
|
396 |
|
397 |
def manual_download(url, dst_dir, file_name, prefix=None):
|
398 |
+
hf_header = f"--header='Authorization: Bearer {huggingface_token}'" if huggingface_token else ""
|
399 |
aria2c_header = "--header='User-Agent: Mozilla/5.0' --allow-overwrite=true"
|
400 |
aria2_args = "--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 --stderr=true -c -x16 -s16 -k1M -j5"
|
401 |
|
|
|
408 |
subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
409 |
|
410 |
elif 'github' in url or "huggingface.co" in url:
|
411 |
+
file_name = clean_url.split("/")[-1] if file_name is None else file_name
|
412 |
|
413 |
""" Formatted info output """
|
|
|
414 |
try:
|
415 |
+
format_output(clean_url, dst_dir, file_name, image_name, image_url)
|
416 |
except UnboundLocalError:
|
417 |
+
format_output(clean_url, dst_dir, file_name, None, None)
|
418 |
|
419 |
# =====================
|
420 |
def run_aria2c(url, dst_dir, file_name=None, args="", header=""):
|
|
|
439 |
|
440 |
# -- GitHub or Hugging Face --
|
441 |
elif 'github' in url or 'huggingface' in url:
|
442 |
+
run_aria2c(clean_url, dst_dir, basename, aria2_args, hf_header if 'huggingface' in url else '')
|
443 |
|
444 |
# -- Other HTTP/Sources --
|
445 |
elif 'http' in url:
|
|
|
449 |
|
450 |
# Separation of merged numbers
|
451 |
def split_numbers(num_str, max_num):
|
452 |
+
result = []
|
453 |
+
i = 0
|
454 |
+
while i < len(num_str):
|
455 |
+
found = False
|
456 |
for length in range(2, 0, -1):
|
457 |
+
if i + length <= len(num_str):
|
458 |
+
part = int(num_str[i:i + length])
|
459 |
if part <= max_num:
|
460 |
+
result.append(part)
|
461 |
+
i += length
|
462 |
+
found = True
|
463 |
+
break
|
464 |
+
if not found:
|
465 |
+
break
|
466 |
+
return result
|
467 |
|
468 |
def add_submodels(selection, num_selection, model_dict, dst_dir):
|
|
|
469 |
if selection == "none":
|
470 |
+
return []
|
471 |
+
selected_models = []
|
472 |
+
|
473 |
if selection == "ALL":
|
474 |
+
selected_models = sum(model_dict.values(), [])
|
|
|
475 |
else:
|
476 |
if selection in model_dict:
|
477 |
selected_models.extend(model_dict[selection])
|
478 |
+
|
479 |
nums = num_selection.replace(',', ' ').split()
|
480 |
max_num = len(model_dict)
|
481 |
+
unique_nums = set()
|
482 |
|
483 |
for num_part in nums:
|
484 |
split_nums = split_numbers(num_part, max_num)
|
485 |
+
unique_nums.update(split_nums)
|
|
|
|
|
|
|
486 |
|
487 |
for num in unique_nums:
|
488 |
if 1 <= num <= max_num:
|
489 |
+
name = list(model_dict.keys())[num - 1]
|
490 |
selected_models.extend(model_dict[name])
|
491 |
|
492 |
+
unique_models = {model['name']: model for model in selected_models}.values()
|
493 |
+
|
494 |
for model in unique_models:
|
495 |
model['dst_dir'] = dst_dir
|
496 |
|
497 |
+
return list(unique_models)
|
498 |
|
499 |
def handle_submodels(selection, num_selection, model_dict, dst_dir, url):
|
500 |
submodels = add_submodels(selection, num_selection, model_dict, dst_dir)
|
|
|
504 |
url += f"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, "
|
505 |
return url
|
506 |
|
507 |
+
url = ""
|
508 |
url = handle_submodels(model, model_num, model_list, models_dir, url)
|
509 |
url = handle_submodels(vae, vae_num, vae_list, vaes_dir, url)
|
510 |
url = handle_submodels(controlnet, controlnet_num, controlnet_list, control_dir, url)
|
files_cells/python/en/launch_en.py
CHANGED
@@ -82,7 +82,6 @@ clear_output()
|
|
82 |
# ================= Automatic Fixing Path V3 ================
|
83 |
paths_to_check = {
|
84 |
"tagger_hf_cache_dir": f"{webui_path}/models/interrogators/",
|
85 |
-
"additional_networks_extra_lora_path": f"{webui_path}/models/Lora/",
|
86 |
"ad_extra_models_dir": f"{webui_path}/models/adetailer/",
|
87 |
"sd_checkpoint_hash": "",
|
88 |
"sd_model_checkpoint": "",
|
|
|
82 |
# ================= Automatic Fixing Path V3 ================
|
83 |
paths_to_check = {
|
84 |
"tagger_hf_cache_dir": f"{webui_path}/models/interrogators/",
|
|
|
85 |
"ad_extra_models_dir": f"{webui_path}/models/adetailer/",
|
86 |
"sd_checkpoint_hash": "",
|
87 |
"sd_model_checkpoint": "",
|
files_cells/python/ru/auto_cleaner_ru.py
CHANGED
@@ -36,7 +36,6 @@ directories = {
|
|
36 |
""" functions """
|
37 |
def clean_directory(directory):
|
38 |
deleted_files = 0
|
39 |
-
image_dir = directories['ΠΠ·ΠΎΠ±ΡΠ°ΠΆΠ΅Π½ΠΈΡ']
|
40 |
|
41 |
for root, dirs, files in os.walk(directory):
|
42 |
for file in files:
|
@@ -44,7 +43,7 @@ def clean_directory(directory):
|
|
44 |
|
45 |
if file.endswith(".txt"):
|
46 |
continue
|
47 |
-
if file.endswith((".safetensors", ".pt"
|
48 |
deleted_files += 1
|
49 |
|
50 |
os.remove(file_path)
|
|
|
36 |
""" functions """
|
37 |
def clean_directory(directory):
|
38 |
deleted_files = 0
|
|
|
39 |
|
40 |
for root, dirs, files in os.walk(directory):
|
41 |
for file in files:
|
|
|
43 |
|
44 |
if file.endswith(".txt"):
|
45 |
continue
|
46 |
+
if file.endswith((".safetensors", ".pt", ".png", ".jpg", ".jpeg")):
|
47 |
deleted_files += 1
|
48 |
|
49 |
os.remove(file_path)
|
files_cells/python/ru/downloading_ru.py
CHANGED
@@ -27,6 +27,7 @@ free_plan = os.getenv('FREE_PLAN')
|
|
27 |
UI = os.getenv('SDW_UI')
|
28 |
OLD_UI = os.getenv('SDW_OLD_UI')
|
29 |
|
|
|
30 |
|
31 |
# ============ loading settings V4 =============
|
32 |
def load_settings(path):
|
@@ -225,8 +226,6 @@ with capture.capture_output() as cap:
|
|
225 |
get_ipython().system("sed -i '521s/open=\\(False\\|True\\)/open=False/' {webui_path}/extensions/Umi-AI-Wildcards/scripts/wildcard_recursive.py # Closed accordion by default")
|
226 |
# --- Encrypt-Image ---
|
227 |
get_ipython().system("sed -i '9,37d' {webui_path}/extensions/Encrypt-Image/javascript/encrypt_images_info.js # Removes the weird text in webui")
|
228 |
-
# --- Additional-Networks ---
|
229 |
-
get_ipython().system('wget -O {webui_path}/extensions/additional-networks/scripts/metadata_editor.py {anxety_repos}/extensions/Additional-Networks/fix/metadata_editor.py # Fixing an error due to old style')
|
230 |
del cap
|
231 |
|
232 |
|
@@ -245,7 +244,7 @@ if commit_hash:
|
|
245 |
## Downloading model and stuff | oh~ Hey! If you're freaked out by that code too, don't worry, me too!
|
246 |
print("π¦ Π‘ΠΊΠ°ΡΠΈΠ²Π°Π½ΠΈΠ΅ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΠΈ ΠΏΡΠΎΡΠ΅Π³ΠΎ...", end='')
|
247 |
|
248 |
-
|
249 |
PREFIXES = {
|
250 |
"model": models_dir,
|
251 |
"vae": vaes_dir,
|
@@ -256,31 +255,22 @@ PREFIXES = {
|
|
256 |
"adetailer": adetailer_dir,
|
257 |
"config": webui_path
|
258 |
}
|
259 |
-
|
260 |
-
extension_repo = []
|
261 |
-
directories = [value for key, value in PREFIXES.items()] # for unpucking zip files
|
262 |
-
get_ipython().system('mkdir -p {" ".join(directories)}')
|
263 |
-
|
264 |
-
hf_token = huggingface_token if huggingface_token else "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"
|
265 |
-
user_header = f"\"Authorization: Bearer {hf_token}\""
|
266 |
|
267 |
''' Formatted Info Output '''
|
268 |
|
269 |
-
from math import floor
|
270 |
-
|
271 |
def center_text(text, terminal_width=45):
|
272 |
padding = (terminal_width - len(text)) // 2
|
273 |
-
return f"
|
274 |
|
275 |
def format_output(url, dst_dir, file_name, image_name=None, image_url=None):
|
276 |
info = center_text(f"[{file_name.split('.')[0]}]")
|
277 |
-
|
278 |
|
279 |
-
print(f"\n{
|
280 |
-
print(f"\033[33mURL:
|
281 |
print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
|
282 |
print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
|
283 |
-
|
284 |
if 'civitai' in url and image_url:
|
285 |
print(f"\033[32m[Preview DL]:\033[0m {image_name} - {image_url}\n")
|
286 |
|
@@ -316,7 +306,7 @@ def CivitAi_API(url, file_name=None):
|
|
316 |
print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n")
|
317 |
return 'None', None, None, None, None, None, None
|
318 |
|
319 |
-
def
|
320 |
model_type = data['model']['type']
|
321 |
model_name = data['files'][0]['name']
|
322 |
|
@@ -328,17 +318,12 @@ def CivitAi_API(url, file_name=None):
|
|
328 |
|
329 |
return model_type, model_name
|
330 |
|
331 |
-
model_type, model_name = extract_model_info(url, data)
|
332 |
-
model_name = file_name or model_name
|
333 |
-
|
334 |
def get_download_url(data, model_type):
|
335 |
if any(t.lower() in model_type.lower() for t in SUPPORT_TYPES):
|
336 |
return data['files'][0]['downloadUrl']
|
337 |
|
338 |
return data['files'][1]['downloadUrl'] if 'type' in url else data['files'][0]['downloadUrl']
|
339 |
|
340 |
-
download_url = get_download_url(data, model_type)
|
341 |
-
|
342 |
def get_image_info(data, model_type, model_name):
|
343 |
if not any(t in model_type for t in SUPPORT_TYPES):
|
344 |
return None, None
|
@@ -350,9 +335,11 @@ def CivitAi_API(url, file_name=None):
|
|
350 |
image_extension = image_url.split('.')[-1]
|
351 |
image_name = f"{model_name.split('.')[0]}.preview.{image_extension}" if image_url else None
|
352 |
return image_url, image_name
|
353 |
-
|
354 |
return None, None
|
355 |
|
|
|
|
|
|
|
356 |
image_url, image_name = get_image_info(data, model_type, model_name)
|
357 |
|
358 |
return f"{download_url}{'&' if '?' in download_url else '?'}token={CIVITAI_TOKEN}", download_url, model_type, model_name, image_url, image_name, data
|
@@ -378,7 +365,7 @@ def download(url):
|
|
378 |
manual_download(url, dst_dir, file_name)
|
379 |
|
380 |
# Unpuck ZIPs Files
|
381 |
-
for directory in
|
382 |
for root, _, files in os.walk(directory):
|
383 |
for file in files:
|
384 |
if file.endswith(".zip"):
|
@@ -408,7 +395,7 @@ def handle_manual(url):
|
|
408 |
extension_repo.append((path, file_name))
|
409 |
|
410 |
def manual_download(url, dst_dir, file_name, prefix=None):
|
411 |
-
|
412 |
aria2c_header = "--header='User-Agent: Mozilla/5.0' --allow-overwrite=true"
|
413 |
aria2_args = "--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 --stderr=true -c -x16 -s16 -k1M -j5"
|
414 |
|
@@ -421,14 +408,13 @@ def manual_download(url, dst_dir, file_name, prefix=None):
|
|
421 |
subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
422 |
|
423 |
elif 'github' in url or "huggingface.co" in url:
|
424 |
-
|
425 |
|
426 |
""" Formatted info output """
|
427 |
-
model_name_or_basename = file_name if file_name else basename
|
428 |
try:
|
429 |
-
format_output(clean_url
|
430 |
except UnboundLocalError:
|
431 |
-
format_output(clean_url
|
432 |
|
433 |
# =====================
|
434 |
def run_aria2c(url, dst_dir, file_name=None, args="", header=""):
|
@@ -453,7 +439,7 @@ def manual_download(url, dst_dir, file_name, prefix=None):
|
|
453 |
|
454 |
# -- GitHub or Hugging Face --
|
455 |
elif 'github' in url or 'huggingface' in url:
|
456 |
-
run_aria2c(clean_url, dst_dir, basename, aria2_args,
|
457 |
|
458 |
# -- Other HTTP/Sources --
|
459 |
elif 'http' in url:
|
@@ -463,50 +449,52 @@ def manual_download(url, dst_dir, file_name, prefix=None):
|
|
463 |
|
464 |
# Separation of merged numbers
|
465 |
def split_numbers(num_str, max_num):
|
466 |
-
|
467 |
-
|
468 |
-
|
|
|
469 |
for length in range(2, 0, -1):
|
470 |
-
if len(
|
471 |
-
part = int(
|
472 |
if part <= max_num:
|
473 |
-
result
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
|
|
|
|
478 |
|
479 |
def add_submodels(selection, num_selection, model_dict, dst_dir):
|
480 |
-
selected_models = []
|
481 |
if selection == "none":
|
482 |
-
return
|
|
|
|
|
483 |
if selection == "ALL":
|
484 |
-
|
485 |
-
selected_models.extend(models)
|
486 |
else:
|
487 |
if selection in model_dict:
|
488 |
selected_models.extend(model_dict[selection])
|
|
|
489 |
nums = num_selection.replace(',', ' ').split()
|
490 |
max_num = len(model_dict)
|
491 |
-
|
492 |
|
493 |
for num_part in nums:
|
494 |
split_nums = split_numbers(num_part, max_num)
|
495 |
-
|
496 |
-
selected_nums.extend(split_nums)
|
497 |
-
|
498 |
-
unique_nums = list(set(selected_nums))
|
499 |
|
500 |
for num in unique_nums:
|
501 |
if 1 <= num <= max_num:
|
502 |
-
name = list(model_dict)[num - 1]
|
503 |
selected_models.extend(model_dict[name])
|
504 |
|
505 |
-
unique_models =
|
|
|
506 |
for model in unique_models:
|
507 |
model['dst_dir'] = dst_dir
|
508 |
|
509 |
-
return unique_models
|
510 |
|
511 |
def handle_submodels(selection, num_selection, model_dict, dst_dir, url):
|
512 |
submodels = add_submodels(selection, num_selection, model_dict, dst_dir)
|
@@ -516,6 +504,7 @@ def handle_submodels(selection, num_selection, model_dict, dst_dir, url):
|
|
516 |
url += f"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, "
|
517 |
return url
|
518 |
|
|
|
519 |
url = handle_submodels(model, model_num, model_list, models_dir, url)
|
520 |
url = handle_submodels(vae, vae_num, vae_list, vaes_dir, url)
|
521 |
url = handle_submodels(controlnet, controlnet_num, controlnet_list, control_dir, url)
|
|
|
27 |
UI = os.getenv('SDW_UI')
|
28 |
OLD_UI = os.getenv('SDW_OLD_UI')
|
29 |
|
30 |
+
os.chdir(root_path)
|
31 |
|
32 |
# ============ loading settings V4 =============
|
33 |
def load_settings(path):
|
|
|
226 |
get_ipython().system("sed -i '521s/open=\\(False\\|True\\)/open=False/' {webui_path}/extensions/Umi-AI-Wildcards/scripts/wildcard_recursive.py # Closed accordion by default")
|
227 |
# --- Encrypt-Image ---
|
228 |
get_ipython().system("sed -i '9,37d' {webui_path}/extensions/Encrypt-Image/javascript/encrypt_images_info.js # Removes the weird text in webui")
|
|
|
|
|
229 |
del cap
|
230 |
|
231 |
|
|
|
244 |
## Downloading model and stuff | oh~ Hey! If you're freaked out by that code too, don't worry, me too!
|
245 |
print("π¦ Π‘ΠΊΠ°ΡΠΈΠ²Π°Π½ΠΈΠ΅ ΠΌΠΎΠ΄Π΅Π»Π΅ΠΉ ΠΈ ΠΏΡΠΎΡΠ΅Π³ΠΎ...", end='')
|
246 |
|
247 |
+
extension_repo = []
|
248 |
PREFIXES = {
|
249 |
"model": models_dir,
|
250 |
"vae": vaes_dir,
|
|
|
255 |
"adetailer": adetailer_dir,
|
256 |
"config": webui_path
|
257 |
}
|
258 |
+
get_ipython().system('mkdir -p {" ".join(PREFIXES.values())}')
|
|
|
|
|
|
|
|
|
|
|
|
|
259 |
|
260 |
''' Formatted Info Output '''
|
261 |
|
|
|
|
|
262 |
def center_text(text, terminal_width=45):
|
263 |
padding = (terminal_width - len(text)) // 2
|
264 |
+
return f"{' ' * padding}{text}{' ' * padding}"
|
265 |
|
266 |
def format_output(url, dst_dir, file_name, image_name=None, image_url=None):
|
267 |
info = center_text(f"[{file_name.split('.')[0]}]")
|
268 |
+
sep_line = '---' * 20
|
269 |
|
270 |
+
print(f"\n\033[32m{sep_line}\033[36;1m{info}\033[32m{sep_line}\033[0m")
|
271 |
+
print(f"\033[33mURL: {url}")
|
272 |
print(f"\033[33mSAVE DIR: \033[34m{dst_dir}")
|
273 |
print(f"\033[33mFILE NAME: \033[34m{file_name}\033[0m")
|
|
|
274 |
if 'civitai' in url and image_url:
|
275 |
print(f"\033[32m[Preview DL]:\033[0m {image_name} - {image_url}\n")
|
276 |
|
|
|
306 |
print("\033[31m[Data Info]:\033[0m Failed to retrieve data from the API.\n")
|
307 |
return 'None', None, None, None, None, None, None
|
308 |
|
309 |
+
def get_model_info(url, data):
|
310 |
model_type = data['model']['type']
|
311 |
model_name = data['files'][0]['name']
|
312 |
|
|
|
318 |
|
319 |
return model_type, model_name
|
320 |
|
|
|
|
|
|
|
321 |
def get_download_url(data, model_type):
|
322 |
if any(t.lower() in model_type.lower() for t in SUPPORT_TYPES):
|
323 |
return data['files'][0]['downloadUrl']
|
324 |
|
325 |
return data['files'][1]['downloadUrl'] if 'type' in url else data['files'][0]['downloadUrl']
|
326 |
|
|
|
|
|
327 |
def get_image_info(data, model_type, model_name):
|
328 |
if not any(t in model_type for t in SUPPORT_TYPES):
|
329 |
return None, None
|
|
|
335 |
image_extension = image_url.split('.')[-1]
|
336 |
image_name = f"{model_name.split('.')[0]}.preview.{image_extension}" if image_url else None
|
337 |
return image_url, image_name
|
|
|
338 |
return None, None
|
339 |
|
340 |
+
model_type, model_name = get_model_info(url, data)
|
341 |
+
model_name = file_name or model_name
|
342 |
+
download_url = get_download_url(data, model_type)
|
343 |
image_url, image_name = get_image_info(data, model_type, model_name)
|
344 |
|
345 |
return f"{download_url}{'&' if '?' in download_url else '?'}token={CIVITAI_TOKEN}", download_url, model_type, model_name, image_url, image_name, data
|
|
|
365 |
manual_download(url, dst_dir, file_name)
|
366 |
|
367 |
# Unpuck ZIPs Files
|
368 |
+
for directory in PREFIXES.values():
|
369 |
for root, _, files in os.walk(directory):
|
370 |
for file in files:
|
371 |
if file.endswith(".zip"):
|
|
|
395 |
extension_repo.append((path, file_name))
|
396 |
|
397 |
def manual_download(url, dst_dir, file_name, prefix=None):
|
398 |
+
hf_header = f"--header='Authorization: Bearer {huggingface_token}'" if huggingface_token else ""
|
399 |
aria2c_header = "--header='User-Agent: Mozilla/5.0' --allow-overwrite=true"
|
400 |
aria2_args = "--optimize-concurrent-downloads --console-log-level=error --summary-interval=10 --stderr=true -c -x16 -s16 -k1M -j5"
|
401 |
|
|
|
408 |
subprocess.run(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
409 |
|
410 |
elif 'github' in url or "huggingface.co" in url:
|
411 |
+
file_name = clean_url.split("/")[-1] if file_name is None else file_name
|
412 |
|
413 |
""" Formatted info output """
|
|
|
414 |
try:
|
415 |
+
format_output(clean_url, dst_dir, file_name, image_name, image_url)
|
416 |
except UnboundLocalError:
|
417 |
+
format_output(clean_url, dst_dir, file_name, None, None)
|
418 |
|
419 |
# =====================
|
420 |
def run_aria2c(url, dst_dir, file_name=None, args="", header=""):
|
|
|
439 |
|
440 |
# -- GitHub or Hugging Face --
|
441 |
elif 'github' in url or 'huggingface' in url:
|
442 |
+
run_aria2c(clean_url, dst_dir, basename, aria2_args, hf_header if 'huggingface' in url else '')
|
443 |
|
444 |
# -- Other HTTP/Sources --
|
445 |
elif 'http' in url:
|
|
|
449 |
|
450 |
# Separation of merged numbers
|
451 |
def split_numbers(num_str, max_num):
|
452 |
+
result = []
|
453 |
+
i = 0
|
454 |
+
while i < len(num_str):
|
455 |
+
found = False
|
456 |
for length in range(2, 0, -1):
|
457 |
+
if i + length <= len(num_str):
|
458 |
+
part = int(num_str[i:i + length])
|
459 |
if part <= max_num:
|
460 |
+
result.append(part)
|
461 |
+
i += length
|
462 |
+
found = True
|
463 |
+
break
|
464 |
+
if not found:
|
465 |
+
break
|
466 |
+
return result
|
467 |
|
468 |
def add_submodels(selection, num_selection, model_dict, dst_dir):
|
|
|
469 |
if selection == "none":
|
470 |
+
return []
|
471 |
+
selected_models = []
|
472 |
+
|
473 |
if selection == "ALL":
|
474 |
+
selected_models = sum(model_dict.values(), [])
|
|
|
475 |
else:
|
476 |
if selection in model_dict:
|
477 |
selected_models.extend(model_dict[selection])
|
478 |
+
|
479 |
nums = num_selection.replace(',', ' ').split()
|
480 |
max_num = len(model_dict)
|
481 |
+
unique_nums = set()
|
482 |
|
483 |
for num_part in nums:
|
484 |
split_nums = split_numbers(num_part, max_num)
|
485 |
+
unique_nums.update(split_nums)
|
|
|
|
|
|
|
486 |
|
487 |
for num in unique_nums:
|
488 |
if 1 <= num <= max_num:
|
489 |
+
name = list(model_dict.keys())[num - 1]
|
490 |
selected_models.extend(model_dict[name])
|
491 |
|
492 |
+
unique_models = {model['name']: model for model in selected_models}.values()
|
493 |
+
|
494 |
for model in unique_models:
|
495 |
model['dst_dir'] = dst_dir
|
496 |
|
497 |
+
return list(unique_models)
|
498 |
|
499 |
def handle_submodels(selection, num_selection, model_dict, dst_dir, url):
|
500 |
submodels = add_submodels(selection, num_selection, model_dict, dst_dir)
|
|
|
504 |
url += f"{submodel['url']} {submodel['dst_dir']} {submodel['name']}, "
|
505 |
return url
|
506 |
|
507 |
+
url = ""
|
508 |
url = handle_submodels(model, model_num, model_list, models_dir, url)
|
509 |
url = handle_submodels(vae, vae_num, vae_list, vaes_dir, url)
|
510 |
url = handle_submodels(controlnet, controlnet_num, controlnet_list, control_dir, url)
|
files_cells/python/ru/launch_ru.py
CHANGED
@@ -82,7 +82,6 @@ clear_output()
|
|
82 |
# ================= Automatic Fixing Path V3 ================
|
83 |
paths_to_check = {
|
84 |
"tagger_hf_cache_dir": f"{webui_path}/models/interrogators/",
|
85 |
-
"additional_networks_extra_lora_path": f"{webui_path}/models/Lora/",
|
86 |
"ad_extra_models_dir": f"{webui_path}/models/adetailer/",
|
87 |
"sd_checkpoint_hash": "",
|
88 |
"sd_model_checkpoint": "",
|
|
|
82 |
# ================= Automatic Fixing Path V3 ================
|
83 |
paths_to_check = {
|
84 |
"tagger_hf_cache_dir": f"{webui_path}/models/interrogators/",
|
|
|
85 |
"ad_extra_models_dir": f"{webui_path}/models/adetailer/",
|
86 |
"sd_checkpoint_hash": "",
|
87 |
"sd_model_checkpoint": "",
|
modules/directory_setup.py
CHANGED
@@ -2,7 +2,7 @@ import os
|
|
2 |
|
3 |
webui_path = os.getenv('WEBUI_PATH')
|
4 |
|
5 |
-
output_dir = f"{webui_path}/
|
6 |
models_dir = f"{webui_path}/models/Stable-diffusion"
|
7 |
vaes_dir = f"{webui_path}/models/VAE"
|
8 |
embeddings_dir = f"{webui_path}/embeddings"
|
|
|
2 |
|
3 |
webui_path = os.getenv('WEBUI_PATH')
|
4 |
|
5 |
+
output_dir = f"{webui_path}/outputs"
|
6 |
models_dir = f"{webui_path}/models/Stable-diffusion"
|
7 |
vaes_dir = f"{webui_path}/models/VAE"
|
8 |
embeddings_dir = f"{webui_path}/embeddings"
|