Spaces:
Running
Running
John6666
commited on
Commit
•
b78b1f5
0
Parent(s):
Super-squash branch 'main' using huggingface_hub
Browse files- .gitattributes +35 -0
- README.md +13 -0
- app.py +33 -0
- danbooru_ja_dict.json +0 -0
- danbooru_to_ja.py +78 -0
- local/danbooru_ja_dict.json +0 -0
- local/danbooru_to_ja.py +78 -0
- local/requirements.txt +1 -0
- requirements.txt +1 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Danbooru Tag to Japanese Tag Converter V2
|
3 |
+
emoji: 📦😻
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.38.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: apache-2.0
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from danbooru_to_ja import danbooru_to_ja
|
3 |
+
|
4 |
+
DESCRIPTION_MD = """
|
5 |
+
# Convert Danbooru tags to Japanese tags V2
|
6 |
+
- [A CLI version of this tool is available here](https://huggingface.co/spaces/John6666/danbooru-to-ja-v2/tree/main/local).
|
7 |
+
""".strip()
|
8 |
+
|
9 |
+
DESCRIPTION_MD2 = """
|
10 |
+
The dictionary was generated using the following repository:\
|
11 |
+
[p1atdev/danbooru-ja-tag-pair-20240715](https://huggingface.co/datasets/p1atdev/danbooru-ja-tag-pair-20240715)
|
12 |
+
""".strip()
|
13 |
+
|
14 |
+
css = """"""
|
15 |
+
|
16 |
+
with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
17 |
+
gr.Markdown(DESCRIPTION_MD)
|
18 |
+
with gr.Column():
|
19 |
+
input_tag = gr.Textbox(label="Input tag", placeholder="1girl, solo, sitting, ...", value="", lines=4)
|
20 |
+
is_append = gr.Checkbox(label="Append input tag to output", value=True)
|
21 |
+
run_button = gr.Button(value="Convert")
|
22 |
+
output_tag = gr.Textbox(label="Output tag", value="", lines=4, interactive=False, show_copy_button=True)
|
23 |
+
gr.Markdown(DESCRIPTION_MD2)
|
24 |
+
|
25 |
+
gr.on(
|
26 |
+
triggers=[input_tag.submit, run_button.click],
|
27 |
+
fn=danbooru_to_ja,
|
28 |
+
inputs=[input_tag, gr.Textbox(visible=False), gr.Textbox(visible=False), is_append],
|
29 |
+
outputs=[output_tag],
|
30 |
+
)
|
31 |
+
|
32 |
+
demo.queue()
|
33 |
+
demo.launch()
|
danbooru_ja_dict.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
danbooru_to_ja.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import re
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
|
6 |
+
def load_json_dict(path: str):
|
7 |
+
import json
|
8 |
+
from pathlib import Path
|
9 |
+
dict = {}
|
10 |
+
if not Path(path).exists(): return dict
|
11 |
+
try:
|
12 |
+
with open(path, encoding='utf-8') as f:
|
13 |
+
dict = json.load(f)
|
14 |
+
except Exception:
|
15 |
+
print(f"Failed to open dictionary file: {path}")
|
16 |
+
return dict
|
17 |
+
return dict
|
18 |
+
|
19 |
+
|
20 |
+
danbooru_ja_dict = load_json_dict('danbooru_ja_dict.json')
|
21 |
+
|
22 |
+
|
23 |
+
def danbooru_tags_to_jas(tags: list[str]):
|
24 |
+
from rapidfuzz.process import extractOne
|
25 |
+
from rapidfuzz.utils import default_process
|
26 |
+
A1111_SPECIAL_SYNTAX_RE = re.compile(r"\s*<[^>]+>\s*")
|
27 |
+
keys = list(danbooru_ja_dict.keys())
|
28 |
+
jas = []
|
29 |
+
for tag in tags:
|
30 |
+
tag = str(tag).strip()
|
31 |
+
if A1111_SPECIAL_SYNTAX_RE.fullmatch(tag): continue
|
32 |
+
s = default_process(str(tag))
|
33 |
+
e1 = extractOne(s, keys, processor=default_process, score_cutoff=90.0)
|
34 |
+
if e1:
|
35 |
+
jas.extend(danbooru_ja_dict[e1[0]].copy())
|
36 |
+
return jas
|
37 |
+
|
38 |
+
|
39 |
+
def danbooru_to_ja(input_tag, input_file, output_file, is_append):
|
40 |
+
if input_file and Path(input_file).exists():
|
41 |
+
try:
|
42 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
43 |
+
input_tag = f.read()
|
44 |
+
except Exception:
|
45 |
+
print(f"Failed to open input file: {input_file}")
|
46 |
+
tags = [tag.strip() for tag in input_tag.split(",")] if input_tag else []
|
47 |
+
ja_tags = danbooru_tags_to_jas(tags)
|
48 |
+
output_tags = tags + ja_tags if is_append else ja_tags
|
49 |
+
output_tag = ", ".join(output_tags)
|
50 |
+
if output_file:
|
51 |
+
try:
|
52 |
+
with open(output_file, mode='w', encoding="utf-8") as f:
|
53 |
+
f.write(output_tag)
|
54 |
+
except Exception:
|
55 |
+
print(f"Failed to write output file: {output_file}")
|
56 |
+
else:
|
57 |
+
print(output_tag)
|
58 |
+
return output_tag
|
59 |
+
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
parser = argparse.ArgumentParser()
|
63 |
+
parser.add_argument("--tags", default=None, type=str, required=False, help="Input tags.")
|
64 |
+
parser.add_argument("--file", default=None, type=str, required=False, help="Input tags from a text file.")
|
65 |
+
parser.add_argument("--out", default=None, type=str, help="Output to text file.")
|
66 |
+
parser.add_argument("--append", default=False, type=bool, help="Whether the output contains the input tags or not.")
|
67 |
+
|
68 |
+
args = parser.parse_args()
|
69 |
+
assert (args.tags, args.file) != (None, None), "Must provide --tags or --file!"
|
70 |
+
|
71 |
+
danbooru_to_ja(args.tags, args.file, args.out, args.append)
|
72 |
+
|
73 |
+
|
74 |
+
# Usage:
|
75 |
+
# python danbooru_to_ja.py --tags "1girl, oomuro sakurako, solo, sitting, starry sky"
|
76 |
+
# python danbooru_to_ja.py --file inputtag.txt
|
77 |
+
# python danbooru_to_ja.py --file inputtag.txt --append True
|
78 |
+
# Datasets: https://huggingface.co/datasets/p1atdev/danbooru-ja-tag-pair-20240715
|
local/danbooru_ja_dict.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
local/danbooru_to_ja.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import re
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
|
6 |
+
def load_json_dict(path: str):
|
7 |
+
import json
|
8 |
+
from pathlib import Path
|
9 |
+
dict = {}
|
10 |
+
if not Path(path).exists(): return dict
|
11 |
+
try:
|
12 |
+
with open(path, encoding='utf-8') as f:
|
13 |
+
dict = json.load(f)
|
14 |
+
except Exception:
|
15 |
+
print(f"Failed to open dictionary file: {path}")
|
16 |
+
return dict
|
17 |
+
return dict
|
18 |
+
|
19 |
+
|
20 |
+
danbooru_ja_dict = load_json_dict('danbooru_ja_dict.json')
|
21 |
+
|
22 |
+
|
23 |
+
def danbooru_tags_to_jas(tags: list[str]):
|
24 |
+
from rapidfuzz.process import extractOne
|
25 |
+
from rapidfuzz.utils import default_process
|
26 |
+
A1111_SPECIAL_SYNTAX_RE = re.compile(r"\s*<[^>]+>\s*")
|
27 |
+
keys = list(danbooru_ja_dict.keys())
|
28 |
+
jas = []
|
29 |
+
for tag in tags:
|
30 |
+
tag = str(tag).strip()
|
31 |
+
if A1111_SPECIAL_SYNTAX_RE.fullmatch(tag): continue
|
32 |
+
s = default_process(str(tag))
|
33 |
+
e1 = extractOne(s, keys, processor=default_process, score_cutoff=90.0)
|
34 |
+
if e1:
|
35 |
+
jas.extend(danbooru_ja_dict[e1[0]].copy())
|
36 |
+
return jas
|
37 |
+
|
38 |
+
|
39 |
+
def danbooru_to_ja(input_tag, input_file, output_file, is_append):
|
40 |
+
if input_file and Path(input_file).exists():
|
41 |
+
try:
|
42 |
+
with open(input_file, 'r', encoding='utf-8') as f:
|
43 |
+
input_tag = f.read()
|
44 |
+
except Exception:
|
45 |
+
print(f"Failed to open input file: {input_file}")
|
46 |
+
tags = [tag.strip() for tag in input_tag.split(",")] if input_tag else []
|
47 |
+
ja_tags = danbooru_tags_to_jas(tags)
|
48 |
+
output_tags = tags + ja_tags if is_append else ja_tags
|
49 |
+
output_tag = ", ".join(output_tags)
|
50 |
+
if output_file:
|
51 |
+
try:
|
52 |
+
with open(output_file, mode='w', encoding="utf-8") as f:
|
53 |
+
f.write(output_tag)
|
54 |
+
except Exception:
|
55 |
+
print(f"Failed to write output file: {output_file}")
|
56 |
+
else:
|
57 |
+
print(output_tag)
|
58 |
+
return output_tag
|
59 |
+
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
parser = argparse.ArgumentParser()
|
63 |
+
parser.add_argument("--tags", default=None, type=str, required=False, help="Input tags.")
|
64 |
+
parser.add_argument("--file", default=None, type=str, required=False, help="Input tags from a text file.")
|
65 |
+
parser.add_argument("--out", default=None, type=str, help="Output to text file.")
|
66 |
+
parser.add_argument("--append", default=False, type=bool, help="Whether the output contains the input tags or not.")
|
67 |
+
|
68 |
+
args = parser.parse_args()
|
69 |
+
assert (args.tags, args.file) != (None, None), "Must provide --tags or --file!"
|
70 |
+
|
71 |
+
danbooru_to_ja(args.tags, args.file, args.out, args.append)
|
72 |
+
|
73 |
+
|
74 |
+
# Usage:
|
75 |
+
# python danbooru_to_ja.py --tags "1girl, oomuro sakurako, solo, sitting, starry sky"
|
76 |
+
# python danbooru_to_ja.py --file inputtag.txt
|
77 |
+
# python danbooru_to_ja.py --file inputtag.txt --append True
|
78 |
+
# Datasets: https://huggingface.co/datasets/p1atdev/danbooru-ja-tag-pair-20240715
|
local/requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
rapidfuzz
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
rapidfuzz
|