|
--- |
|
tags: |
|
- stable-diffusion |
|
- stable-diffusion-diffusers |
|
- text-to-image |
|
- diffusers |
|
- instruct-pix2pix |
|
datasets: |
|
- UCSC-VLAA/HQ-Edit |
|
--- |
|
## Quick Start |
|
Make sure to install the libraries first: |
|
|
|
```bash |
|
pip install accelerate transformers |
|
pip install git+https://github.com/huggingface/diffusers |
|
``` |
|
|
|
```python |
|
import torch |
|
import torch |
|
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler |
|
from diffusers.utils import load_image |
|
|
|
image_guidance_scale = 1.5 |
|
guidance_scale = 7.0 |
|
model_id = "MudeHui/HQ-Edit" |
|
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None) |
|
pipe.to("cuda") |
|
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) |
|
resolution = 512 |
|
image = load_image( |
|
"https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" |
|
).resize((resolution, resolution)) |
|
|
|
edit_instruction = "Turn sky into a cloudy one" |
|
edited_image = pipe( |
|
prompt=edit_instruction, |
|
image=image, |
|
height=resolution, |
|
width=resolution, |
|
guidance_scale=image_guidance_scale, |
|
image_guidance_scale=image_guidance_scale, |
|
num_inference_steps=30, |
|
).images[0] |
|
edited_image.save("edited_image.png") |
|
``` |