Spaces:
Runtime error
Runtime error
Upload 4 files
Browse files- README (1).md +13 -0
- app (1).py +112 -0
- convert.py +53 -0
- requirements (1).txt +111 -0
README (1).md
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Braille Detection
|
3 |
+
emoji: 🕶
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: yellow
|
6 |
+
sdk: streamlit
|
7 |
+
sdk_version: 1.17.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: true
|
10 |
+
license: mit
|
11 |
+
---
|
12 |
+
|
13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app (1).py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Reference
|
3 |
+
- https://docs.streamlit.io/library/api-reference/layout
|
4 |
+
- https://github.com/CodingMantras/yolov8-streamlit-detection-tracking/blob/master/app.py
|
5 |
+
- https://huggingface.co/keremberke/yolov8m-valorant-detection/tree/main
|
6 |
+
- https://docs.ultralytics.com/usage/python/
|
7 |
+
"""
|
8 |
+
import time
|
9 |
+
import PIL
|
10 |
+
|
11 |
+
import streamlit as st
|
12 |
+
import torch
|
13 |
+
from ultralyticsplus import YOLO, render_result
|
14 |
+
|
15 |
+
from convert import convert_to_braille_unicode, parse_xywh_and_class
|
16 |
+
|
17 |
+
|
18 |
+
def load_model(model_path):
|
19 |
+
"""load model from path"""
|
20 |
+
model = YOLO(model_path)
|
21 |
+
return model
|
22 |
+
|
23 |
+
|
24 |
+
def load_image(image_path):
|
25 |
+
"""load image from path"""
|
26 |
+
image = PIL.Image.open(image_path)
|
27 |
+
return image
|
28 |
+
|
29 |
+
|
30 |
+
# title
|
31 |
+
st.title("Braille Pattern Detection")
|
32 |
+
|
33 |
+
# sidebar
|
34 |
+
st.sidebar.header("Detection Config")
|
35 |
+
|
36 |
+
conf = float(st.sidebar.slider("Class Confidence", 10, 75, 15)) / 100
|
37 |
+
iou = float(st.sidebar.slider("IoU Threshold", 10, 75, 15)) / 100
|
38 |
+
|
39 |
+
model_path = "snoop2head/yolov8m-braille"
|
40 |
+
|
41 |
+
try:
|
42 |
+
model = load_model(model_path)
|
43 |
+
model.overrides["conf"] = conf # NMS confidence threshold
|
44 |
+
model.overrides["iou"] = iou # NMS IoU threshold
|
45 |
+
model.overrides["agnostic_nms"] = False # NMS class-agnostic
|
46 |
+
model.overrides["max_det"] = 1000 # maximum number of detections per image
|
47 |
+
|
48 |
+
except Exception as ex:
|
49 |
+
print(ex)
|
50 |
+
st.write(f"Unable to load model. Check the specified path: {model_path}")
|
51 |
+
|
52 |
+
source_img = None
|
53 |
+
|
54 |
+
source_img = st.sidebar.file_uploader(
|
55 |
+
"Choose an image...", type=("jpg", "jpeg", "png", "bmp", "webp")
|
56 |
+
)
|
57 |
+
col1, col2 = st.columns(2)
|
58 |
+
|
59 |
+
# left column of the page body
|
60 |
+
with col1:
|
61 |
+
if source_img is None:
|
62 |
+
default_image_path = "./images/alpha-numeric.jpeg"
|
63 |
+
image = load_image(default_image_path)
|
64 |
+
st.image(
|
65 |
+
default_image_path, caption="Example Input Image", use_column_width=True
|
66 |
+
)
|
67 |
+
else:
|
68 |
+
image = load_image(source_img)
|
69 |
+
st.image(source_img, caption="Uploaded Image", use_column_width=True)
|
70 |
+
|
71 |
+
# right column of the page body
|
72 |
+
with col2:
|
73 |
+
with st.spinner("Wait for it..."):
|
74 |
+
start_time = time.time()
|
75 |
+
try:
|
76 |
+
with torch.no_grad():
|
77 |
+
res = model.predict(
|
78 |
+
image, save=True, save_txt=True, exist_ok=True, conf=conf
|
79 |
+
)
|
80 |
+
boxes = res[0].boxes # first image
|
81 |
+
res_plotted = res[0].plot()[:, :, ::-1]
|
82 |
+
|
83 |
+
list_boxes = parse_xywh_and_class(boxes)
|
84 |
+
|
85 |
+
st.image(res_plotted, caption="Detected Image", use_column_width=True)
|
86 |
+
IMAGE_DOWNLOAD_PATH = f"runs/detect/predict/image0.jpg"
|
87 |
+
|
88 |
+
except Exception as ex:
|
89 |
+
st.write("Please upload image with types of JPG, JPEG, PNG ...")
|
90 |
+
|
91 |
+
|
92 |
+
try:
|
93 |
+
st.success(f"Done! Inference time: {time.time() - start_time:.2f} seconds")
|
94 |
+
st.subheader("Detected Braille Patterns")
|
95 |
+
for box_line in list_boxes:
|
96 |
+
str_left_to_right = ""
|
97 |
+
box_classes = box_line[:, -1]
|
98 |
+
for each_class in box_classes:
|
99 |
+
str_left_to_right += convert_to_braille_unicode(
|
100 |
+
model.names[int(each_class)]
|
101 |
+
)
|
102 |
+
st.write(str_left_to_right)
|
103 |
+
except Exception as ex:
|
104 |
+
st.write("Please try again with images with types of JPG, JPEG, PNG ...")
|
105 |
+
|
106 |
+
with open(IMAGE_DOWNLOAD_PATH, "rb") as fl:
|
107 |
+
st.download_button(
|
108 |
+
"Download object-detected image",
|
109 |
+
data=fl,
|
110 |
+
file_name="image0.jpg",
|
111 |
+
mime="image/jpg",
|
112 |
+
)
|
convert.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import numpy as np
|
3 |
+
import torch
|
4 |
+
|
5 |
+
|
6 |
+
def convert_to_braille_unicode(str_input: str, path: str = "./braille_map.json") -> str:
|
7 |
+
with open(path, "r") as fl:
|
8 |
+
data = json.load(fl)
|
9 |
+
|
10 |
+
if str_input in data.keys():
|
11 |
+
str_output = data[str_input]
|
12 |
+
return str_output
|
13 |
+
|
14 |
+
|
15 |
+
def parse_xywh_and_class(boxes: torch.Tensor) -> list:
|
16 |
+
"""
|
17 |
+
boxes input tensor
|
18 |
+
boxes (torch.Tensor) or (numpy.ndarray): A tensor or numpy array containing the detection boxes,
|
19 |
+
with shape (num_boxes, 6).
|
20 |
+
orig_shape (torch.Tensor) or (numpy.ndarray): Original image size, in the format (height, width).
|
21 |
+
|
22 |
+
Properties:
|
23 |
+
xyxy (torch.Tensor) or (numpy.ndarray): The boxes in xyxy format.
|
24 |
+
conf (torch.Tensor) or (numpy.ndarray): The confidence values of the boxes.
|
25 |
+
cls (torch.Tensor) or (numpy.ndarray): The class values of the boxes.
|
26 |
+
xywh (torch.Tensor) or (numpy.ndarray): The boxes in xywh format.
|
27 |
+
xyxyn (torch.Tensor) or (numpy.ndarray): The boxes in xyxy format normalized by original image size.
|
28 |
+
xywhn (torch.Tensor) or (numpy.ndarray): The boxes in xywh format normalized by original image size.
|
29 |
+
"""
|
30 |
+
|
31 |
+
# copy values from troublesome "boxes" object to numpy array
|
32 |
+
new_boxes = np.zeros(boxes.shape)
|
33 |
+
new_boxes[:, :4] = boxes.xywh.numpy() # first 4 channels are xywh
|
34 |
+
new_boxes[:, 4] = boxes.conf.numpy() # 5th channel is confidence
|
35 |
+
new_boxes[:, 5] = boxes.cls.numpy() # 6th channel is class which is last channel
|
36 |
+
|
37 |
+
# sort according to y coordinate
|
38 |
+
new_boxes = new_boxes[new_boxes[:, 1].argsort()]
|
39 |
+
|
40 |
+
# find threshold index to break the line
|
41 |
+
y_threshold = np.mean(new_boxes[:, 3]) // 2
|
42 |
+
boxes_diff = np.diff(new_boxes[:, 1])
|
43 |
+
threshold_index = np.where(boxes_diff > y_threshold)[0]
|
44 |
+
|
45 |
+
# cluster according to threshold_index
|
46 |
+
boxes_clustered = np.split(new_boxes, threshold_index + 1)
|
47 |
+
boxes_return = []
|
48 |
+
for cluster in boxes_clustered:
|
49 |
+
# sort according to x coordinate
|
50 |
+
cluster = cluster[cluster[:, 0].argsort()]
|
51 |
+
boxes_return.append(cluster)
|
52 |
+
|
53 |
+
return boxes_return
|
requirements (1).txt
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==1.4.0
|
2 |
+
altair==4.2.2
|
3 |
+
antlr4-python3-runtime==4.9.3
|
4 |
+
appnope==0.1.3
|
5 |
+
asttokens==2.2.1
|
6 |
+
attrs==22.2.0
|
7 |
+
backcall==0.2.0
|
8 |
+
backports.zoneinfo==0.2.1
|
9 |
+
blinker==1.5
|
10 |
+
cachetools==5.3.0
|
11 |
+
certifi==2022.12.7
|
12 |
+
charset-normalizer==3.1.0
|
13 |
+
click==8.0.4
|
14 |
+
contourpy==1.0.7
|
15 |
+
cycler==0.11.0
|
16 |
+
decorator==5.1.1
|
17 |
+
entrypoints==0.4
|
18 |
+
executing==1.2.0
|
19 |
+
filelock==3.10.4
|
20 |
+
fire==0.5.0
|
21 |
+
fonttools==4.39.2
|
22 |
+
gitdb==4.0.10
|
23 |
+
GitPython==3.1.31
|
24 |
+
google-auth==2.16.3
|
25 |
+
google-auth-oauthlib==0.4.6
|
26 |
+
grpcio==1.51.3
|
27 |
+
huggingface-hub==0.13.3
|
28 |
+
hydra-core==1.3.2
|
29 |
+
idna==3.4
|
30 |
+
importlib-metadata==6.1.0
|
31 |
+
importlib-resources==5.12.0
|
32 |
+
ipython==8.11.0
|
33 |
+
jedi==0.18.2
|
34 |
+
Jinja2==3.1.2
|
35 |
+
jsonschema==4.17.3
|
36 |
+
kiwisolver==1.4.4
|
37 |
+
Markdown==3.4.3
|
38 |
+
markdown-it-py==2.2.0
|
39 |
+
MarkupSafe==2.1.2
|
40 |
+
matplotlib==3.7.1
|
41 |
+
matplotlib-inline==0.1.6
|
42 |
+
mdurl==0.1.2
|
43 |
+
mpmath==1.3.0
|
44 |
+
networkx==3.0
|
45 |
+
numpy==1.24.2
|
46 |
+
oauthlib==3.2.2
|
47 |
+
omegaconf==2.3.0
|
48 |
+
opencv-python==4.6.0.66
|
49 |
+
packaging==23.0
|
50 |
+
pandas==1.5.3
|
51 |
+
parso==0.8.3
|
52 |
+
pexpect==4.8.0
|
53 |
+
pickleshare==0.7.5
|
54 |
+
Pillow==9.4.0
|
55 |
+
pkgutil_resolve_name==1.3.10
|
56 |
+
prompt-toolkit==3.0.38
|
57 |
+
protobuf==3.20.3
|
58 |
+
psutil==5.9.4
|
59 |
+
ptyprocess==0.7.0
|
60 |
+
pure-eval==0.2.2
|
61 |
+
pyarrow==11.0.0
|
62 |
+
pyasn1==0.4.8
|
63 |
+
pyasn1-modules==0.2.8
|
64 |
+
pybboxes==0.1.6
|
65 |
+
pydeck==0.8.0
|
66 |
+
Pygments==2.14.0
|
67 |
+
Pympler==1.0.1
|
68 |
+
pyparsing==3.0.9
|
69 |
+
pyrsistent==0.19.3
|
70 |
+
python-dateutil==2.8.2
|
71 |
+
pytz==2023.2
|
72 |
+
pytz-deprecation-shim==0.1.0.post0
|
73 |
+
PyYAML==6.0
|
74 |
+
requests==2.28.2
|
75 |
+
requests-oauthlib==1.3.1
|
76 |
+
rich==13.3.2
|
77 |
+
rsa==4.9
|
78 |
+
sahi==0.11.13
|
79 |
+
scipy==1.10.1
|
80 |
+
seaborn==0.12.2
|
81 |
+
semver==2.13.0
|
82 |
+
sentry-sdk==1.17.0
|
83 |
+
shapely==2.0.1
|
84 |
+
six==1.16.0
|
85 |
+
smmap==5.0.0
|
86 |
+
stack-data==0.6.2
|
87 |
+
streamlit==1.20.0
|
88 |
+
sympy==1.11.1
|
89 |
+
tensorboard==2.12.0
|
90 |
+
tensorboard-data-server==0.7.0
|
91 |
+
tensorboard-plugin-wit==1.8.1
|
92 |
+
termcolor==2.2.0
|
93 |
+
terminaltables==3.1.10
|
94 |
+
thop==0.1.1.post2209072238
|
95 |
+
toml==0.10.2
|
96 |
+
toolz==0.12.0
|
97 |
+
torch==2.0.0
|
98 |
+
torchvision==0.15.1
|
99 |
+
tornado==6.2
|
100 |
+
tqdm==4.65.0
|
101 |
+
traitlets==5.9.0
|
102 |
+
typing_extensions==4.5.0
|
103 |
+
tzdata==2023.2
|
104 |
+
tzlocal==4.3
|
105 |
+
ultralytics==8.0.43
|
106 |
+
ultralyticsplus==0.0.28
|
107 |
+
urllib3==1.26.15
|
108 |
+
validators==0.20.0
|
109 |
+
wcwidth==0.2.6
|
110 |
+
Werkzeug==2.2.3
|
111 |
+
zipp==3.15.0
|