lllyasviel commited on
Commit
2f3af37
1 Parent(s): 6c4ecfe

1.0.19 (#33)

Browse files

Unlock to allow changing model.

fooocus_version.py CHANGED
@@ -1,2 +1 @@
1
- version = '1.0.17'
2
-
 
1
+ version = '1.0.19'
 
modules/core.py CHANGED
@@ -29,6 +29,14 @@ class StableDiffusionModel:
29
  self.clip = clip
30
  self.clip_vision = clip_vision
31
 
 
 
 
 
 
 
 
 
32
 
33
  @torch.no_grad()
34
  def load_model(ckpt_filename):
@@ -42,8 +50,8 @@ def load_lora(model, lora_filename, strength_model=1.0, strength_clip=1.0):
42
  return model
43
 
44
  lora = comfy.utils.load_torch_file(lora_filename, safe_load=True)
45
- model.unet, model.clip = comfy.sd.load_lora_for_models(model.unet, model.clip, lora, strength_model, strength_clip)
46
- return model
47
 
48
 
49
  @torch.no_grad()
@@ -92,7 +100,7 @@ def get_previewer(device, latent_format):
92
  @torch.no_grad()
93
  def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
94
  scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
95
- force_full_denoise=False):
96
  # SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
97
  # SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
98
  # "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
@@ -118,6 +126,8 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa
118
  pbar = comfy.utils.ProgressBar(steps)
119
 
120
  def callback(step, x0, x, total_steps):
 
 
121
  if previewer and step % 3 == 0:
122
  previewer.preview(x0, step, total_steps)
123
  pbar.update_absolute(step + 1, total_steps, None)
 
29
  self.clip = clip
30
  self.clip_vision = clip_vision
31
 
32
+ def to_meta(self):
33
+ if self.unet is not None:
34
+ self.unet.model.to('meta')
35
+ if self.clip is not None:
36
+ self.clip.cond_stage_model.to('meta')
37
+ if self.vae is not None:
38
+ self.vae.first_stage_model.to('meta')
39
+
40
 
41
  @torch.no_grad()
42
  def load_model(ckpt_filename):
 
50
  return model
51
 
52
  lora = comfy.utils.load_torch_file(lora_filename, safe_load=True)
53
+ unet, clip = comfy.sd.load_lora_for_models(model.unet, model.clip, lora, strength_model, strength_clip)
54
+ return StableDiffusionModel(unet=unet, clip=clip, vae=model.vae, clip_vision=model.clip_vision)
55
 
56
 
57
  @torch.no_grad()
 
100
  @torch.no_grad()
101
  def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu',
102
  scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None,
103
+ force_full_denoise=False, callback_function=None):
104
  # SCHEDULERS = ["normal", "karras", "exponential", "simple", "ddim_uniform"]
105
  # SAMPLERS = ["euler", "euler_ancestral", "heun", "dpm_2", "dpm_2_ancestral",
106
  # "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu",
 
126
  pbar = comfy.utils.ProgressBar(steps)
127
 
128
  def callback(step, x0, x, total_steps):
129
+ if callback_function is not None:
130
+ callback_function(step, x0, x, total_steps)
131
  if previewer and step % 3 == 0:
132
  previewer.preview(x0, step, total_steps)
133
  pbar.update_absolute(step + 1, total_steps, None)
modules/default_pipeline.py CHANGED
@@ -1,46 +1,146 @@
1
  import modules.core as core
2
  import os
3
  import torch
 
4
 
5
- from modules.path import modelfile_path, lorafile_path
6
 
7
 
8
- xl_base_filename = os.path.join(modelfile_path, 'sd_xl_base_1.0_0.9vae.safetensors')
9
- xl_refiner_filename = os.path.join(modelfile_path, 'sd_xl_refiner_1.0_0.9vae.safetensors')
10
- xl_base_offset_lora_filename = os.path.join(lorafile_path, 'sd_xl_offset_example-lora_1.0.safetensors')
11
 
12
- xl_base = core.load_model(xl_base_filename)
13
- xl_base = core.load_lora(xl_base, xl_base_offset_lora_filename, strength_model=0.5, strength_clip=0.0)
14
- del xl_base.vae
15
 
16
- xl_refiner = core.load_model(xl_refiner_filename)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
 
19
  @torch.no_grad()
20
  def process(positive_prompt, negative_prompt, steps, switch, width, height, image_seed, callback):
21
- positive_conditions = core.encode_prompt_condition(clip=xl_base.clip, prompt=positive_prompt)
22
- negative_conditions = core.encode_prompt_condition(clip=xl_base.clip, prompt=negative_prompt)
23
-
24
- positive_conditions_refiner = core.encode_prompt_condition(clip=xl_refiner.clip, prompt=positive_prompt)
25
- negative_conditions_refiner = core.encode_prompt_condition(clip=xl_refiner.clip, prompt=negative_prompt)
26
 
27
  empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1)
28
 
29
- sampled_latent = core.ksampler_with_refiner(
30
- model=xl_base.unet,
31
- positive=positive_conditions,
32
- negative=negative_conditions,
33
- refiner=xl_refiner.unet,
34
- refiner_positive=positive_conditions_refiner,
35
- refiner_negative=negative_conditions_refiner,
36
- refiner_switch_step=switch,
37
- latent=empty_latent,
38
- steps=steps, start_step=0, last_step=steps, disable_noise=False, force_full_denoise=True,
39
- seed=image_seed,
40
- callback_function=callback
41
- )
42
-
43
- decoded_latent = core.decode_vae(vae=xl_refiner.vae, latent_image=sampled_latent)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  images = core.image_to_numpy(decoded_latent)
46
 
 
1
  import modules.core as core
2
  import os
3
  import torch
4
+ import modules.path
5
 
6
+ from comfy.model_base import SDXL, SDXLRefiner
7
 
8
 
9
+ xl_base: core.StableDiffusionModel = None
10
+ xl_base_hash = ''
 
11
 
12
+ xl_refiner: core.StableDiffusionModel = None
13
+ xl_refiner_hash = ''
 
14
 
15
+ xl_base_patched: core.StableDiffusionModel = None
16
+ xl_base_patched_hash = ''
17
+
18
+
19
+ def refresh_base_model(name):
20
+ global xl_base, xl_base_hash, xl_base_patched, xl_base_patched_hash
21
+ if xl_base_hash == str(name):
22
+ return
23
+
24
+ filename = os.path.join(modules.path.modelfile_path, name)
25
+
26
+ if xl_base is not None:
27
+ xl_base.to_meta()
28
+ xl_base = None
29
+
30
+ xl_base = core.load_model(filename)
31
+ if not isinstance(xl_base.unet.model, SDXL):
32
+ print('Model not supported. Fooocus only support SDXL model as the base model.')
33
+ xl_base = None
34
+ xl_base_hash = ''
35
+ refresh_base_model(modules.path.default_base_model_name)
36
+ xl_base_hash = name
37
+ xl_base_patched = xl_base
38
+ xl_base_patched_hash = ''
39
+ return
40
+
41
+ xl_base_hash = name
42
+ xl_base_patched = xl_base
43
+ xl_base_patched_hash = ''
44
+ print(f'Base model loaded: {xl_base_hash}')
45
+
46
+ return
47
+
48
+
49
+ def refresh_refiner_model(name):
50
+ global xl_refiner, xl_refiner_hash
51
+ if xl_refiner_hash == str(name):
52
+ return
53
+
54
+ if name == 'None':
55
+ xl_refiner = None
56
+ xl_refiner_hash = ''
57
+ print(f'Refiner unloaded.')
58
+ return
59
+
60
+ filename = os.path.join(modules.path.modelfile_path, name)
61
+
62
+ if xl_refiner is not None:
63
+ xl_refiner.to_meta()
64
+ xl_refiner = None
65
+
66
+ xl_refiner = core.load_model(filename)
67
+ if not isinstance(xl_refiner.unet.model, SDXLRefiner):
68
+ print('Model not supported. Fooocus only support SDXL refiner as the refiner.')
69
+ xl_refiner = None
70
+ xl_refiner_hash = ''
71
+ print(f'Refiner unloaded.')
72
+ return
73
+
74
+ xl_refiner_hash = name
75
+ print(f'Refiner model loaded: {xl_refiner_hash}')
76
+
77
+ xl_refiner.vae.first_stage_model.to('meta')
78
+ xl_refiner.vae = None
79
+ return
80
+
81
+
82
+ def refresh_loras(loras):
83
+ global xl_base, xl_base_patched, xl_base_patched_hash
84
+ if xl_base_patched_hash == str(loras):
85
+ return
86
+
87
+ model = xl_base
88
+ for name, weight in loras:
89
+ if name == 'None':
90
+ continue
91
+
92
+ filename = os.path.join(modules.path.lorafile_path, name)
93
+ model = core.load_lora(model, filename, strength_model=weight, strength_clip=weight)
94
+ xl_base_patched = model
95
+ xl_base_patched_hash = str(loras)
96
+ print(f'LoRAs loaded: {xl_base_patched_hash}')
97
+
98
+ return
99
+
100
+
101
+ refresh_base_model(modules.path.default_base_model_name)
102
+ refresh_refiner_model(modules.path.default_refiner_model_name)
103
+ refresh_loras([(modules.path.default_lora_name, 0.5), ('None', 0.5), ('None', 0.5), ('None', 0.5), ('None', 0.5)])
104
 
105
 
106
  @torch.no_grad()
107
  def process(positive_prompt, negative_prompt, steps, switch, width, height, image_seed, callback):
108
+ positive_conditions = core.encode_prompt_condition(clip=xl_base_patched.clip, prompt=positive_prompt)
109
+ negative_conditions = core.encode_prompt_condition(clip=xl_base_patched.clip, prompt=negative_prompt)
 
 
 
110
 
111
  empty_latent = core.generate_empty_latent(width=width, height=height, batch_size=1)
112
 
113
+ if xl_refiner is not None:
114
+
115
+ positive_conditions_refiner = core.encode_prompt_condition(clip=xl_refiner.clip, prompt=positive_prompt)
116
+ negative_conditions_refiner = core.encode_prompt_condition(clip=xl_refiner.clip, prompt=negative_prompt)
117
+
118
+ sampled_latent = core.ksampler_with_refiner(
119
+ model=xl_base_patched.unet,
120
+ positive=positive_conditions,
121
+ negative=negative_conditions,
122
+ refiner=xl_refiner.unet,
123
+ refiner_positive=positive_conditions_refiner,
124
+ refiner_negative=negative_conditions_refiner,
125
+ refiner_switch_step=switch,
126
+ latent=empty_latent,
127
+ steps=steps, start_step=0, last_step=steps, disable_noise=False, force_full_denoise=True,
128
+ seed=image_seed,
129
+ callback_function=callback
130
+ )
131
+
132
+ else:
133
+ sampled_latent = core.ksampler(
134
+ model=xl_base_patched.unet,
135
+ positive=positive_conditions,
136
+ negative=negative_conditions,
137
+ latent=empty_latent,
138
+ steps=steps, start_step=0, last_step=steps, disable_noise=False, force_full_denoise=True,
139
+ seed=image_seed,
140
+ callback_function=callback
141
+ )
142
+
143
+ decoded_latent = core.decode_vae(vae=xl_base_patched.vae, latent_image=sampled_latent)
144
 
145
  images = core.image_to_numpy(decoded_latent)
146
 
modules/path.py CHANGED
@@ -5,3 +5,35 @@ lorafile_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../mode
5
  temp_outputs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../outputs/'))
6
 
7
  os.makedirs(temp_outputs_path, exist_ok=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  temp_outputs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../outputs/'))
6
 
7
  os.makedirs(temp_outputs_path, exist_ok=True)
8
+
9
+ default_base_model_name = 'sd_xl_base_1.0_0.9vae.safetensors'
10
+ default_refiner_model_name = 'sd_xl_refiner_1.0_0.9vae.safetensors'
11
+ default_lora_name = 'sd_xl_offset_example-lora_1.0.safetensors'
12
+ default_lora_weight = 0.5
13
+
14
+ model_filenames = []
15
+ lora_filenames = []
16
+
17
+
18
+ def get_model_filenames(folder_path):
19
+ if not os.path.isdir(folder_path):
20
+ raise ValueError("Folder path is not a valid directory.")
21
+
22
+ filenames = []
23
+ for filename in os.listdir(folder_path):
24
+ if os.path.isfile(os.path.join(folder_path, filename)):
25
+ _, file_extension = os.path.splitext(filename)
26
+ if file_extension.lower() in ['.pth', '.ckpt', '.bin', '.safetensors']:
27
+ filenames.append(filename)
28
+
29
+ return filenames
30
+
31
+
32
+ def update_all_model_names():
33
+ global model_filenames, lora_filenames
34
+ model_filenames = get_model_filenames(modelfile_path)
35
+ lora_filenames = get_model_filenames(lorafile_path)
36
+ return
37
+
38
+
39
+ update_all_model_names()
modules/sdxl_styles.py CHANGED
@@ -2,7 +2,7 @@
2
 
3
  styles = [
4
  {
5
- "name": "sai-base",
6
  "prompt": "{prompt}",
7
  "negative_prompt": ""
8
  },
@@ -529,7 +529,7 @@ styles = [
529
  ]
530
 
531
  styles = {k['name']: (k['prompt'], k['negative_prompt']) for k in styles}
532
- default_style = styles['sai-base']
533
  style_keys = list(styles.keys())
534
 
535
 
 
2
 
3
  styles = [
4
  {
5
+ "name": "None",
6
  "prompt": "{prompt}",
7
  "negative_prompt": ""
8
  },
 
529
  ]
530
 
531
  styles = {k['name']: (k['prompt'], k['negative_prompt']) for k in styles}
532
+ default_style = styles['None']
533
  style_keys = list(styles.keys())
534
 
535
 
update_log.md CHANGED
@@ -1,3 +1,7 @@
 
 
 
 
1
  ### 1.0.17
2
 
3
  * Change default model to SDXL-1.0-vae-0.9. (This means the models will be downloaded again, but we should do it as early as possible so that all new users only need to download once. Really sorry for day-0 users. But frankly this is not too late considering that the project is just publicly available in less than 24 hours - if it has been a week then we will prefer more lightweight tricks to update.)
 
1
+ ### 1.0.19
2
+
3
+ * Unlock to allow changing model.
4
+
5
  ### 1.0.17
6
 
7
  * Change default model to SDXL-1.0-vae-0.9. (This means the models will be downloaded again, but we should do it as early as possible so that all new users only need to download once. Really sorry for day-0 users. But frankly this is not too late considering that the project is just publicly available in less than 24 hours - if it has been a week then we will prefer more lightweight tricks to update.)
webui.py CHANGED
@@ -1,16 +1,23 @@
1
  import gradio as gr
 
2
  import random
3
  import fooocus_version
 
4
 
5
  from modules.sdxl_styles import apply_style, style_keys, aspect_ratios
6
- from modules.default_pipeline import process
7
  from modules.cv2win32 import close_all_preview, save_image
8
  from modules.util import generate_temp_filename
9
- from modules.path import temp_outputs_path
10
 
11
 
12
  def generate_clicked(prompt, negative_prompt, style_selction, performance_selction,
13
- aspect_ratios_selction, image_number, image_seed, progress=gr.Progress()):
 
 
 
 
 
 
 
14
 
15
  p_txt, n_txt = apply_style(style_selction, prompt, negative_prompt)
16
 
@@ -35,10 +42,10 @@ def generate_clicked(prompt, negative_prompt, style_selction, performance_selcti
35
  progress(float(done_steps) / float(all_steps), f'Step {step}/{total_steps} in the {i}-th Sampling')
36
 
37
  for i in range(image_number):
38
- imgs = process(p_txt, n_txt, steps, switch, width, height, seed, callback=callback)
39
 
40
  for x in imgs:
41
- local_temp_filename = generate_temp_filename(folder=temp_outputs_path, extension='png')
42
  save_image(local_temp_filename, x)
43
 
44
  seed += 1
@@ -61,21 +68,45 @@ with block:
61
  with gr.Row():
62
  advanced_checkbox = gr.Checkbox(label='Advanced', value=False, container=False)
63
  with gr.Column(scale=0.5, visible=False) as right_col:
64
- with gr.Tab(label='Generator Setting'):
65
  performance_selction = gr.Radio(label='Performance', choices=['Speed', 'Quality'], value='Speed')
66
  aspect_ratios_selction = gr.Radio(label='Aspect Ratios (width × height)', choices=list(aspect_ratios.keys()),
67
  value='1152×896')
68
  image_number = gr.Slider(label='Image Number', minimum=1, maximum=32, step=1, value=2)
69
  image_seed = gr.Number(label='Random Seed', value=-1, precision=0)
70
  negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.")
71
- with gr.Tab(label='Image Style'):
72
  style_selction = gr.Radio(show_label=False, container=True,
73
  choices=style_keys, value='cinematic-default')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, right_col)
75
  ctrls = [
76
  prompt, negative_prompt, style_selction,
77
  performance_selction, aspect_ratios_selction, image_number, image_seed
78
  ]
 
79
  run_button.click(fn=generate_clicked, inputs=ctrls, outputs=[gallery])
80
 
81
  block.launch(inbrowser=True)
 
1
  import gradio as gr
2
+ import modules.path
3
  import random
4
  import fooocus_version
5
+ import modules.default_pipeline as pipeline
6
 
7
  from modules.sdxl_styles import apply_style, style_keys, aspect_ratios
 
8
  from modules.cv2win32 import close_all_preview, save_image
9
  from modules.util import generate_temp_filename
 
10
 
11
 
12
  def generate_clicked(prompt, negative_prompt, style_selction, performance_selction,
13
+ aspect_ratios_selction, image_number, image_seed, base_model_name, refiner_model_name,
14
+ l1, w1, l2, w2, l3, w3, l4, w4, l5, w5, progress=gr.Progress()):
15
+
16
+ loras = [(l1, w1), (l2, w2), (l3, w3), (l4, w4), (l5, w5)]
17
+
18
+ pipeline.refresh_base_model(base_model_name)
19
+ pipeline.refresh_refiner_model(refiner_model_name)
20
+ pipeline.refresh_loras(loras)
21
 
22
  p_txt, n_txt = apply_style(style_selction, prompt, negative_prompt)
23
 
 
42
  progress(float(done_steps) / float(all_steps), f'Step {step}/{total_steps} in the {i}-th Sampling')
43
 
44
  for i in range(image_number):
45
+ imgs = pipeline.process(p_txt, n_txt, steps, switch, width, height, seed, callback=callback)
46
 
47
  for x in imgs:
48
+ local_temp_filename = generate_temp_filename(folder=modules.path.temp_outputs_path, extension='png')
49
  save_image(local_temp_filename, x)
50
 
51
  seed += 1
 
68
  with gr.Row():
69
  advanced_checkbox = gr.Checkbox(label='Advanced', value=False, container=False)
70
  with gr.Column(scale=0.5, visible=False) as right_col:
71
+ with gr.Tab(label='Setting'):
72
  performance_selction = gr.Radio(label='Performance', choices=['Speed', 'Quality'], value='Speed')
73
  aspect_ratios_selction = gr.Radio(label='Aspect Ratios (width × height)', choices=list(aspect_ratios.keys()),
74
  value='1152×896')
75
  image_number = gr.Slider(label='Image Number', minimum=1, maximum=32, step=1, value=2)
76
  image_seed = gr.Number(label='Random Seed', value=-1, precision=0)
77
  negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.")
78
+ with gr.Tab(label='Style'):
79
  style_selction = gr.Radio(show_label=False, container=True,
80
  choices=style_keys, value='cinematic-default')
81
+ with gr.Tab(label='Advanced'):
82
+ with gr.Row():
83
+ base_model = gr.Dropdown(label='SDXL Base Model', choices=modules.path.model_filenames, value=modules.path.default_base_model_name, show_label=True)
84
+ refiner_model = gr.Dropdown(label='SDXL Refiner', choices=['None'] + modules.path.model_filenames, value=modules.path.default_refiner_model_name, show_label=True)
85
+ with gr.Accordion(label='LoRAs', open=True):
86
+ lora_ctrls = []
87
+ for i in range(5):
88
+ with gr.Row():
89
+ lora_model = gr.Dropdown(label=f'SDXL LoRA {i+1}', choices=['None'] + modules.path.lora_filenames, value=modules.path.default_lora_name if i == 0 else 'None')
90
+ lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=modules.path.default_lora_weight)
91
+ lora_ctrls += [lora_model, lora_weight]
92
+ model_refresh = gr.Button(label='Refresh', value='Refresh All Files', variant='secondary')
93
+
94
+ def model_refresh_clicked():
95
+ modules.path.update_all_model_names()
96
+ results = []
97
+ results += [gr.update(choices=modules.path.model_filenames), gr.update(choices=['None'] + modules.path.model_filenames)]
98
+ for i in range(5):
99
+ results += [gr.update(choices=['None'] + modules.path.lora_filenames), gr.update()]
100
+ return results
101
+
102
+ model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls)
103
+
104
  advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, right_col)
105
  ctrls = [
106
  prompt, negative_prompt, style_selction,
107
  performance_selction, aspect_ratios_selction, image_number, image_seed
108
  ]
109
+ ctrls += [base_model, refiner_model] + lora_ctrls
110
  run_button.click(fn=generate_clicked, inputs=ctrls, outputs=[gallery])
111
 
112
  block.launch(inbrowser=True)