|
from huggingface_hub import from_pretrained_keras |
|
from keras_cv import models |
|
import gradio as gr |
|
|
|
from tensorflow import keras |
|
|
|
from diffusers import StableDiffusionPipeline |
|
|
|
keras.mixed_precision.set_global_policy("mixed_float16") |
|
|
|
|
|
resolution = 512 |
|
|
|
|
|
model_ckpt = "nielsgl/dreambooth-bored-ape" |
|
pipeline = StableDiffusionPipeline.from_pretrained(model_ckpt) |
|
pipeline = pipeline.to("cuda") |
|
|
|
unique_id = "drawbayc" |
|
class_label = "monkey" |
|
prompt = f"A drawing of {unique_id} {class_label} as a cowboy" |
|
image = pipeline(prompt, num_inference_steps=50).images[0] |
|
|
|
|
|
def infer(prompt, negative_prompt, guidance_scale=10, num_inference_steps=50): |
|
neg = negative_prompt if negative_prompt else None |
|
imgs = [] |
|
while len(imgs) != 4: |
|
next_prompt = pipeline(prompt, negative_prompt=neg, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=5) |
|
for img, is_neg in zip(next_prompt.images, next_prompt.nsfw_content_detected): |
|
if not is_neg: |
|
imgs.append(img) |
|
if len(imgs) == 4: |
|
break |
|
|
|
return imgs |
|
|
|
output = gr.Gallery(label="Outputs").style(grid=(1,2)) |
|
|
|
|
|
title = "KerasCV Stable Diffusion Demo on images of Bored Apes." |
|
description = "This is a dreambooth model fine-tuned on images the NFT collection of the Bored Ape Yacht Club. To try it, input the concept with `drawbayc ape`." |
|
examples=[ |
|
["A drawing of a drawbayc ape dressed as a cowboy", "bad anatomy, blurry, ugly, deformed, disfigured", 12, 75], |
|
["A drawing of a drawbayc ape dressed as a clown", "bad anatomy, blurry, ugly, deformed, disfigured", 12, 75], |
|
["A drawing of a drawbayc ape dressed as a turtle", "bad anatomy, blurry, ugly, deformed, disfigured", 12, 75], |
|
|
|
] |
|
|
|
base_14 = "https://huggingface.co/nielsgl/dreambooth-bored-ape/resolve/main/" |
|
|
|
model_card_1 = f""" |
|
# KerasCV Stable Diffusion in Diffusers π§¨π€ |
|
|
|
DreamBooth model for the `drawbayc ape` concept trained by nielsgl on the [bayc-tiny](https://huggingface.co/datasets/nielsgl/bayc-tiny) dataset, images from this [Kaggle dataset](https://www.kaggle.com/datasets/stanleyjzheng/bored-apes-yacht-club). |
|
It can be used by modifying the `instance_prompt`: **a drawing of drawbayc ape** |
|
|
|
The model for this space can be found [here](https://huggingface.co/nielsgl/dreambooth-bored-ape). |
|
|
|
## Description |
|
|
|
The Stable Diffusion V2 pipeline contained in the corresponding repository (`nielsgl/dreambooth-bored-ape`) was created using a modified version of [this Space](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) for StableDiffusionV2 from KerasCV. The purpose is to convert the KerasCV Stable Diffusion weights in a way that is compatible with [Diffusers](https://github.com/huggingface/diffusers). This allows users to fine-tune using KerasCV and use the fine-tuned weights in Diffusers taking advantage of its nifty features (like [schedulers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers), [fast attention](https://huggingface.co/docs/diffusers/optimization/fp16), etc.). |
|
This model was created as part of the Keras DreamBooth Sprint π₯. Visit the [organisation page](https://huggingface.co/keras-dreambooth) for instructions on how to take part! |
|
|
|
## Demo |
|
|
|
""" |
|
|
|
model_card_2 = f""" |
|
## Examples |
|
|
|
|
|
> A drawing of drawbayc monkey dressed as an astronaut |
|
|
|
![a drawing of drawbayc monkey dressed as an astronaut]({base_14}examples/astronaut.jpg) |
|
|
|
> A drawing of drawbayc monkey dressed as the pope |
|
|
|
![> A drawing of drawbayc monkey dressed as an astronaut]({base_14}examples/pope.jpg) |
|
|
|
|
|
## Usage with Stable Diffusion V2.1 |
|
|
|
```python |
|
from diffusers import StableDiffusionPipeline |
|
|
|
pipeline = StableDiffusionPipeline.from_pretrained('nielsgl/dreambooth-bored-ape') |
|
image = pipeline().images[0] |
|
image |
|
``` |
|
|
|
""" |
|
|
|
with gr.Blocks() as demo: |
|
with gr.Row(): |
|
gr.Markdown(model_card_1) |
|
with gr.Row(): |
|
with gr.Column(): |
|
prompt_pos = gr.Textbox(label="Positive Prompt", value="a drawing of drawbayc ape as an astronaut") |
|
prompt_neg = gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry, ugly, deformed, disfigured") |
|
|
|
prompt_gs = gr.Number(label='Guidance scale', value=12) |
|
prompt_steps = gr.Slider(label="Inference Steps",value=50) |
|
prompt_btn = gr.Button("Generate") |
|
with gr.Column(): |
|
output = gr.Gallery(label="Outputs").style(grid=(1,2)) |
|
prompt_btn.click(infer, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=[output]) |
|
with gr.Row(): |
|
gr.Examples(examples, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=output, fn=infer, cache_examples=True) |
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown(model_card_2) |
|
with gr.Column(): |
|
gr.Markdown(" ") |
|
|
|
demo.queue().launch() |
|
|