add model
Browse files- .gitattributes +2 -0
- README.md +49 -0
- config.json +42 -0
- diffusion_pytorch_model.bin +3 -0
- images/architecture.png +3 -0
- images/monster.png +3 -0
- images/skull_test.png +3 -0
- images/tree.png +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
diffusion_pytorch_model.bin filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -1,3 +1,52 @@
|
|
1 |
---
|
|
|
|
|
|
|
|
|
2 |
license: openrail++
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
tags:
|
3 |
+
- stable-diffusion
|
4 |
+
- controlnet
|
5 |
+
- qrcode
|
6 |
license: openrail++
|
7 |
+
language:
|
8 |
+
- en
|
9 |
---
|
10 |
+
|
11 |
+
# Controlnet QR Code Monster v1 For SD-1.5
|
12 |
+
|
13 |
+
![QR code in shape of a tree, reading "https://huggingface.co/monster-labs"](images/tree.png)
|
14 |
+
|
15 |
+
## Model Description
|
16 |
+
|
17 |
+
This model is made to generate creative QR codes that still scan.
|
18 |
+
Keep in mind that not all generated codes might be readable, but you can try different parameters and prompts to get the desired results.
|
19 |
+
|
20 |
+
You can try it in a space [Try it here!](https://huggingface.co/spaces/monster-labs/Controlnet-QRCode-Monster-V1) or use it localy.
|
21 |
+
|
22 |
+
We're already working on v2, which is much more powerful, you can try [an early version here](https://qrcodemonster.art) ! OR just scan the monster below !
|
23 |
+
![QR code in shape of a blue monster, reading "https://qrcodemonster.art"](images/monster.png)
|
24 |
+
|
25 |
+
## How to use
|
26 |
+
|
27 |
+
- **Condition**: QR codes are passed as condition images with a module size of 16px. Use a higher error correction level to make it easier to read (sometimes a lower level can be easier to read if smaller in size).
|
28 |
+
|
29 |
+
- **Prompts**: Use a prompt to guide the QR code generation. The output will highly depend on the given prompt. Some seem to be really easily accepted by the qr code process, some will require careful tweaking to get good results.
|
30 |
+
|
31 |
+
- **Controlnet guidance scale**: Set the controlnet guidance scale value:
|
32 |
+
- High values: The generated QR code will be more readable.
|
33 |
+
- Low values: The generated QR code will be more creative.
|
34 |
+
|
35 |
+
### Tips
|
36 |
+
|
37 |
+
- You might need to generate multiple QR codes with the same parameters to get a readable output (our new model greatly improves this).
|
38 |
+
|
39 |
+
- Use the Image-to-Image feature to improve the readability of a QR code:
|
40 |
+
- Decrease the denoising strength to retain more of the original image.
|
41 |
+
- Increase the controlnet guidance scale value for better readability.
|
42 |
+
A typical workflow for "saving" a code would be :
|
43 |
+
Max out the guidance scale and minimize the denoising strength, then bump the strength until the code scans.
|
44 |
+
|
45 |
+
## Example outputs
|
46 |
+
|
47 |
+
Here are some examples of creative and readable QR codes generated using this model:
|
48 |
+
|
49 |
+
![A gothic sculpture in shape of a QR code, reading "test"](images/skull_test.png)
|
50 |
+
![City ruins with a building facade in shape of a QR code, reading "sd is cool"](images/architecture.png)
|
51 |
+
|
52 |
+
Feel free to experiment with prompts, parameters, and the Image-to-Image feature to achieve the desired QR code output. Good luck and have fun!
|
config.json
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "ControlNetModel",
|
3 |
+
"_diffusers_version": "0.17.0.dev0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"attention_head_dim": 8,
|
6 |
+
"block_out_channels": [
|
7 |
+
320,
|
8 |
+
640,
|
9 |
+
1280,
|
10 |
+
1280
|
11 |
+
],
|
12 |
+
"class_embed_type": null,
|
13 |
+
"conditioning_embedding_out_channels": [
|
14 |
+
16,
|
15 |
+
32,
|
16 |
+
96,
|
17 |
+
256
|
18 |
+
],
|
19 |
+
"controlnet_conditioning_channel_order": "rgb",
|
20 |
+
"cross_attention_dim": 768,
|
21 |
+
"down_block_types": [
|
22 |
+
"CrossAttnDownBlock2D",
|
23 |
+
"CrossAttnDownBlock2D",
|
24 |
+
"CrossAttnDownBlock2D",
|
25 |
+
"DownBlock2D"
|
26 |
+
],
|
27 |
+
"downsample_padding": 1,
|
28 |
+
"flip_sin_to_cos": true,
|
29 |
+
"freq_shift": 0,
|
30 |
+
"global_pool_conditions": false,
|
31 |
+
"in_channels": 4,
|
32 |
+
"layers_per_block": 2,
|
33 |
+
"mid_block_scale_factor": 1,
|
34 |
+
"norm_eps": 1e-05,
|
35 |
+
"norm_num_groups": 32,
|
36 |
+
"num_class_embeds": null,
|
37 |
+
"only_cross_attention": false,
|
38 |
+
"projection_class_embeddings_input_dim": null,
|
39 |
+
"resnet_time_scale_shift": "default",
|
40 |
+
"upcast_attention": false,
|
41 |
+
"use_linear_projection": false
|
42 |
+
}
|
diffusion_pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:29bc1cbd7de2e4bee030184321724bd0c6a1724dd815eb8e593379c39badfdf2
|
3 |
+
size 1445259705
|
images/architecture.png
ADDED
Git LFS Details
|
images/monster.png
ADDED
Git LFS Details
|
images/skull_test.png
ADDED
Git LFS Details
|
images/tree.png
ADDED
Git LFS Details
|