dong625 commited on
Commit
5dec311
1 Parent(s): 2216e77

Update fooocus_launch.py

Browse files
Files changed (1) hide show
  1. fooocus_launch.py +57 -42
fooocus_launch.py CHANGED
@@ -2,6 +2,8 @@ import os
2
  import ssl
3
  import sys
4
 
 
 
5
  root = os.path.dirname(os.path.abspath(__file__))
6
  sys.path.append(root)
7
  os.chdir(root)
@@ -9,7 +11,7 @@ os.chdir(root)
9
  os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
10
  os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
11
  if "GRADIO_SERVER_PORT" not in os.environ:
12
- os.environ["GRADIO_SERVER_PORT"] = "6666"
13
 
14
  ssl._create_default_https_context = ssl._create_unverified_context
15
 
@@ -53,16 +55,53 @@ def prepare_environment():
53
 
54
  if REINSTALL_ALL or not requirements_met(requirements_file):
55
  run_pip(f"install -r \"{requirements_file}\"", "requirements")
 
56
  return
57
 
 
58
  vae_approx_filenames = [
59
- ('xlvaeapp.pth', 'https://hf-mirror.com/lllyasviel/misc/resolve/main/xlvaeapp.pth'),
60
- ('vaeapp_sd15.pth', 'https://hf-mirror.com/lllyasviel/misc/resolve/main/vaeapp_sd15.pt'),
61
- ('xl-to-v1_interposer-v3.1.safetensors',
62
- 'https://hf-mirror.com/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors')
63
  ]
64
 
65
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
  def ini_args():
67
  from args_manager import args
68
  return args
@@ -76,6 +115,10 @@ if args.gpu_device_id is not None:
76
  os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
77
  print("Set device to:", args.gpu_device_id)
78
 
 
 
 
 
79
  from modules import config
80
 
81
  os.environ['GRADIO_TEMP_DIR'] = config.temp_path
@@ -84,20 +127,21 @@ if config.temp_path_cleanup_on_launch:
84
  print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}')
85
  result = delete_folder_content(config.temp_path, '[Cleanup] ')
86
  if result:
87
- print("[Cleanup] Cleanup successful")
88
  else:
89
  print(f"[Cleanup] Failed to delete content of temp dir.")
90
 
 
91
  def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
92
  for file_name, url in vae_approx_filenames:
93
  load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)
94
 
95
  load_file_from_url(
96
- url='https://hf-mirror.com/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
97
  model_dir=config.path_fooocus_expansion,
98
  file_name='pytorch_model.bin'
99
  )
100
-
101
  if args.disable_preset_download:
102
  print('Skipped model download.')
103
  return default_model, checkpoint_downloads
@@ -113,49 +157,20 @@ def download_models(default_model, previous_default_models, checkpoint_downloads
113
  checkpoint_downloads = {}
114
  default_model = alternative_model_name
115
  break
 
116
  for file_name, url in checkpoint_downloads.items():
117
  load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name)
118
  for file_name, url in embeddings_downloads.items():
119
  load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
120
  for file_name, url in lora_downloads.items():
121
  load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
 
122
  return default_model, checkpoint_downloads
123
 
 
124
  config.default_base_model_name, config.checkpoint_downloads = download_models(
125
  config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads,
126
  config.embeddings_downloads, config.lora_downloads)
127
 
128
  from webui import *
129
- import requests
130
- def get_daily_sentence(types=['a']):
131
- params = {'c': types}
132
- response = requests.get("https://v1.hitokoto.cn/", params=params)
133
- if response.status_code == 200:
134
- data = response.json()
135
- return "\033[36m\033[4m" + data["hitokoto"] + "\033[0m"
136
- else:
137
- return "\033[36m\033[4m静待花开会有时,守得云开见月明\033[0m"
138
- daily_sentence = get_daily_sentence(['i'])
139
- print(daily_sentence)
140
- def count_execution():
141
- if not os.path.exists("/mnt/workspace/Fooocus/count.txt"):
142
- with open("/mnt/workspace/Fooocus/count.txt", "w") as f:
143
- f.write("0")
144
- with open("/mnt/workspace/Fooocus/count.txt", "r") as f:
145
- count = int(f.read())
146
- count += 1
147
- with open("/mnt/workspace/Fooocus/count.txt", "w") as f:
148
- f.write(str(count))
149
- return count
150
- count=count_execution()
151
- if(count<6):
152
- print(f"这是您在本实例中第{count}次启动Fooocus")
153
- print("👇请点击下方蓝色的\033[34m http:\\\\127.0.0.1:6666 \033[0m 打开Fooocus界面👇")
154
- if(count>5):
155
- print(daily_sentence)
156
- print(f"这是您在本实例中第{count}次启动Fooocus")
157
- print("Fooocus服务启动中,如在使用过程中有任何问题,请联系VX:H917724495")
158
- if os.path.exists("/mnt/workspace/Untitled.ipynb"):
159
- os.remove("/mnt/workspace/Untitled.ipynb")
160
- if os.path.exists("/mnt/workspace/不会用点我.jpg"):
161
- os.remove("/mnt/workspace/不会用点我.jpg")
 
2
  import ssl
3
  import sys
4
 
5
+ print('[System ARGV] ' + str(sys.argv))
6
+
7
  root = os.path.dirname(os.path.abspath(__file__))
8
  sys.path.append(root)
9
  os.chdir(root)
 
11
  os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
12
  os.environ["PYTORCH_MPS_HIGH_WATERMARK_RATIO"] = "0.0"
13
  if "GRADIO_SERVER_PORT" not in os.environ:
14
+ os.environ["GRADIO_SERVER_PORT"] = "7865"
15
 
16
  ssl._create_default_https_context = ssl._create_unverified_context
17
 
 
55
 
56
  if REINSTALL_ALL or not requirements_met(requirements_file):
57
  run_pip(f"install -r \"{requirements_file}\"", "requirements")
58
+
59
  return
60
 
61
+
62
  vae_approx_filenames = [
63
+ ('xlvaeapp.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/xlvaeapp.pth'),
64
+ ('vaeapp_sd15.pth', 'https://huggingface.co/lllyasviel/misc/resolve/main/vaeapp_sd15.pt'),
65
+ ('xl-to-v1_interposer-v4.0.safetensors',
66
+ 'https://huggingface.co/mashb1t/misc/resolve/main/xl-to-v1_interposer-v4.0.safetensors')
67
  ]
68
 
69
+ def dong625():
70
+ import requests
71
+ def get_daily_sentence(types=['a']):
72
+ params = {
73
+ 'c': types
74
+ }
75
+ response = requests.get("https://v1.hitokoto.cn/", params=params)
76
+ if response.status_code == 200:
77
+ data = response.json()
78
+ return "\033[36m\033[4m" + data["hitokoto"] + "\033[0m"
79
+ else:
80
+ return " \033[36m\033[4m静待花开会有时,守得云开见月明\033[0m"
81
+ daily_sentence = get_daily_sentence(['i',])
82
+
83
+ def count_execution():
84
+ if not os.path.exists("/mnt/workspace/Fooocus/count.txt"):
85
+ with open("/mnt/workspace/Fooocus/count.txt", "w") as f:
86
+ f.write("0")
87
+ with open("/mnt/workspace/Fooocus/count.txt", "r") as f:
88
+ count = int(f.read())
89
+ count += 1
90
+ with open("/mnt/workspace/Fooocus/count.txt", "w") as f:
91
+ f.write(str(count))
92
+ return count
93
+ count=count_execution()
94
+ if(count<6):
95
+ print(f"这是您在本实例中第{count}次启动Fooocus")
96
+ if(count>5):
97
+ print(daily_sentence)
98
+ print(f"这是您在本实例中第{count}次启动Fooocus")
99
+ print("Fooocus服务启动中,如在使用过程中有任何问题,请联系VX:H917724495")
100
+ if os.path.exists("/mnt/workspace/Untitled.ipynb"):
101
+ os.remove("/mnt/workspace/Untitled.ipynb")
102
+ if os.path.exists("/mnt/workspace/不会用点我.jpg"):
103
+ os.remove("/mnt/workspace/不会用点我.jpg")
104
+
105
  def ini_args():
106
  from args_manager import args
107
  return args
 
115
  os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id)
116
  print("Set device to:", args.gpu_device_id)
117
 
118
+ if args.hf_mirror is not None :
119
+ os.environ['HF_MIRROR'] = str(args.hf_mirror)
120
+ print("Set hf_mirror to:", args.hf_mirror)
121
+
122
  from modules import config
123
 
124
  os.environ['GRADIO_TEMP_DIR'] = config.temp_path
 
127
  print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}')
128
  result = delete_folder_content(config.temp_path, '[Cleanup] ')
129
  if result:
130
+ print("[Cleanup] Cleanup successful")
131
  else:
132
  print(f"[Cleanup] Failed to delete content of temp dir.")
133
 
134
+
135
  def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads):
136
  for file_name, url in vae_approx_filenames:
137
  load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name)
138
 
139
  load_file_from_url(
140
+ url='https://huggingface.co/lllyasviel/misc/resolve/main/fooocus_expansion.bin',
141
  model_dir=config.path_fooocus_expansion,
142
  file_name='pytorch_model.bin'
143
  )
144
+
145
  if args.disable_preset_download:
146
  print('Skipped model download.')
147
  return default_model, checkpoint_downloads
 
157
  checkpoint_downloads = {}
158
  default_model = alternative_model_name
159
  break
160
+
161
  for file_name, url in checkpoint_downloads.items():
162
  load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name)
163
  for file_name, url in embeddings_downloads.items():
164
  load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name)
165
  for file_name, url in lora_downloads.items():
166
  load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name)
167
+
168
  return default_model, checkpoint_downloads
169
 
170
+
171
  config.default_base_model_name, config.checkpoint_downloads = download_models(
172
  config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads,
173
  config.embeddings_downloads, config.lora_downloads)
174
 
175
  from webui import *
176
+ dong625()