ElenaRyumina commited on
Commit
d1b31ce
1 Parent(s): b0005f4
Files changed (12) hide show
  1. .flake8 +5 -0
  2. CODE_OF_CONDUCT.md +80 -0
  3. app.css +68 -0
  4. app.py +18 -156
  5. app/__init__.py +0 -0
  6. app/app_utils.py +52 -0
  7. app/config.py +39 -0
  8. app/description.py +17 -0
  9. app/face_utils.py +33 -0
  10. app/model.py +55 -0
  11. config.toml +5 -0
  12. requirements.txt +1 -1
.flake8 ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ ; https://www.flake8rules.com/
2
+
3
+ [flake8]
4
+ max-line-length = 120
5
+ ignore = E203, E402, E741, W503
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ In the interest of fostering an open and welcoming environment, we as
6
+ contributors and maintainers pledge to make participation in our project and
7
+ our community a harassment-free experience for everyone, regardless of age, body
8
+ size, disability, ethnicity, sex characteristics, gender identity and expression,
9
+ level of experience, education, socio-economic status, nationality, personal
10
+ appearance, race, religion, or sexual identity and orientation.
11
+
12
+ ## Our Standards
13
+
14
+ Examples of behavior that contributes to creating a positive environment
15
+ include:
16
+
17
+ * Using welcoming and inclusive language
18
+ * Being respectful of differing viewpoints and experiences
19
+ * Gracefully accepting constructive criticism
20
+ * Focusing on what is best for the community
21
+ * Showing empathy towards other community members
22
+
23
+ Examples of unacceptable behavior by participants include:
24
+
25
+ * The use of sexualized language or imagery and unwelcome sexual attention or
26
+ advances
27
+ * Trolling, insulting/derogatory comments, and personal or political attacks
28
+ * Public or private harassment
29
+ * Publishing others' private information, such as a physical or electronic
30
+ address, without explicit permission
31
+ * Other conduct which could reasonably be considered inappropriate in a
32
+ professional setting
33
+
34
+ ## Our Responsibilities
35
+
36
+ Project maintainers are responsible for clarifying the standards of acceptable
37
+ behavior and are expected to take appropriate and fair corrective action in
38
+ response to any instances of unacceptable behavior.
39
+
40
+ Project maintainers have the right and responsibility to remove, edit, or
41
+ reject comments, commits, code, wiki edits, issues, and other contributions
42
+ that are not aligned to this Code of Conduct, or to ban temporarily or
43
+ permanently any contributor for other behaviors that they deem inappropriate,
44
+ threatening, offensive, or harmful.
45
+
46
+ ## Scope
47
+
48
+ This Code of Conduct applies within all project spaces, and it also applies when
49
+ an individual is representing the project or its community in public spaces.
50
+ Examples of representing a project or community include using an official
51
+ project e-mail address, posting via an official social media account, or acting
52
+ as an appointed representative at an online or offline event. Representation of
53
+ a project may be further defined and clarified by project maintainers.
54
+
55
+ This Code of Conduct also applies outside the project spaces when there is a
56
+ reasonable belief that an individual's behavior may have a negative impact on
57
+ the project or its community.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported by contacting the project team at <[email protected]>. All
63
+ complaints will be reviewed and investigated and will result in a response that
64
+ is deemed necessary and appropriate to the circumstances. The project team is
65
+ obligated to maintain confidentiality with regard to the reporter of an incident.
66
+ Further details of specific enforcement policies may be posted separately.
67
+
68
+ Project maintainers who do not follow or enforce the Code of Conduct in good
69
+ faith may face temporary or permanent repercussions as determined by other
70
+ members of the project's leadership.
71
+
72
+ ## Attribution
73
+
74
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
75
+ available at <https://www.contributor-covenant.org/version/1/4/code-of-conduct.html>
76
+
77
+ [homepage]: https://www.contributor-covenant.org
78
+
79
+ For answers to common questions about this code of conduct, see
80
+ <https://www.contributor-covenant.org/faq>
app.css ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ div.app-flex-container {
2
+ display: flex;
3
+ align-items: left;
4
+ }
5
+
6
+ div.app-flex-container > img {
7
+ margin-right: 6px;
8
+ }
9
+
10
+ div.dl1 div.upload-container {
11
+ height: 350px;
12
+ max-height: 350px;
13
+ }
14
+
15
+ div.dl2 {
16
+ max-height: 200px;
17
+ }
18
+
19
+ div.dl2 img {
20
+ max-height: 200px;
21
+ }
22
+
23
+ .submit {
24
+ display: inline-block;
25
+ padding: 10px 20px;
26
+ font-size: 16px;
27
+ font-weight: bold;
28
+ text-align: center;
29
+ text-decoration: none;
30
+ cursor: pointer;
31
+ border: var(--button-border-width) solid var(--button-primary-border-color);
32
+ background: var(--button-primary-background-fill);
33
+ color: var(--button-primary-text-color);
34
+ border-radius: 8px;
35
+ transition: all 0.3s ease;
36
+ }
37
+
38
+ .submit[disabled] {
39
+ cursor: not-allowed;
40
+ opacity: 0.6;
41
+ }
42
+
43
+ .submit:hover:not([disabled]) {
44
+ border-color: var(--button-primary-border-color-hover);
45
+ background: var(--button-primary-background-fill-hover);
46
+ color: var(--button-primary-text-color-hover);
47
+ }
48
+
49
+ .clear {
50
+ display: inline-block;
51
+ padding: 10px 20px;
52
+ font-size: 16px;
53
+ font-weight: bold;
54
+ text-align: center;
55
+ text-decoration: none;
56
+ cursor: pointer;
57
+ border-radius: 8px;
58
+ transition: all 0.3s ease;
59
+ }
60
+
61
+ .clear[disabled] {
62
+ cursor: not-allowed;
63
+ opacity: 0.6;
64
+ }
65
+
66
+ .submit:active:not([disabled]), .clear:active:not([disabled]) {
67
+ transform: scale(0.98);
68
+ }
app.py CHANGED
@@ -1,110 +1,17 @@
1
- import torch
2
- from PIL import Image
3
- from torchvision import transforms
4
- import mediapipe as mp
5
- import numpy as np
6
- import math
7
- import requests
 
8
 
9
  import gradio as gr
10
 
11
- model_url = "https://huggingface.co/ElenaRyumina/face_emotion_recognition/resolve/main/FER_static_ResNet50_AffectNet.pth"
12
- model_path = "FER_static_ResNet50_AffectNet.pth"
13
-
14
- response = requests.get(model_url, stream=True)
15
- with open(model_path, "wb") as file:
16
- for chunk in response.iter_content(chunk_size=8192):
17
- file.write(chunk)
18
-
19
- pth_model = torch.jit.load(model_path)
20
- pth_model.eval()
21
-
22
- DICT_EMO = {
23
- 0: "Neutral",
24
- 1: "Happiness",
25
- 2: "Sadness",
26
- 3: "Surprise",
27
- 4: "Fear",
28
- 5: "Disgust",
29
- 6: "Anger",
30
- }
31
-
32
- mp_face_mesh = mp.solutions.face_mesh
33
-
34
-
35
- def pth_processing(fp):
36
- class PreprocessInput(torch.nn.Module):
37
- def init(self):
38
- super(PreprocessInput, self).init()
39
-
40
- def forward(self, x):
41
- x = x.to(torch.float32)
42
- x = torch.flip(x, dims=(0,))
43
- x[0, :, :] -= 91.4953
44
- x[1, :, :] -= 103.8827
45
- x[2, :, :] -= 131.0912
46
- return x
47
-
48
- def get_img_torch(img):
49
- ttransform = transforms.Compose([transforms.PILToTensor(), PreprocessInput()])
50
- img = img.resize((224, 224), Image.Resampling.NEAREST)
51
- img = ttransform(img)
52
- img = torch.unsqueeze(img, 0)
53
- return img
54
-
55
- return get_img_torch(fp)
56
-
57
-
58
- def norm_coordinates(normalized_x, normalized_y, image_width, image_height):
59
- x_px = min(math.floor(normalized_x * image_width), image_width - 1)
60
- y_px = min(math.floor(normalized_y * image_height), image_height - 1)
61
-
62
- return x_px, y_px
63
-
64
-
65
- def get_box(fl, w, h):
66
- idx_to_coors = {}
67
- for idx, landmark in enumerate(fl.landmark):
68
- landmark_px = norm_coordinates(landmark.x, landmark.y, w, h)
69
-
70
- if landmark_px:
71
- idx_to_coors[idx] = landmark_px
72
-
73
- x_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 0])
74
- y_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 1])
75
- endX = np.max(np.asarray(list(idx_to_coors.values()))[:, 0])
76
- endY = np.max(np.asarray(list(idx_to_coors.values()))[:, 1])
77
-
78
- (startX, startY) = (max(0, x_min), max(0, y_min))
79
- (endX, endY) = (min(w - 1, endX), min(h - 1, endY))
80
-
81
- return startX, startY, endX, endY
82
-
83
-
84
- def predict(inp):
85
- inp = np.array(inp)
86
- h, w = inp.shape[:2]
87
-
88
- with mp_face_mesh.FaceMesh(
89
- max_num_faces=1,
90
- refine_landmarks=False,
91
- min_detection_confidence=0.5,
92
- min_tracking_confidence=0.5,
93
- ) as face_mesh:
94
- results = face_mesh.process(inp)
95
- if results.multi_face_landmarks:
96
- for fl in results.multi_face_landmarks:
97
- startX, startY, endX, endY = get_box(fl, w, h)
98
- cur_face = inp[startY:endY, startX:endX]
99
- cur_face_n = pth_processing(Image.fromarray(cur_face))
100
- prediction = (
101
- torch.nn.functional.softmax(pth_model(cur_face_n), dim=1)
102
- .detach()
103
- .numpy()[0]
104
- )
105
- confidences = {DICT_EMO[i]: float(prediction[i]) for i in range(7)}
106
-
107
- return cur_face, confidences
108
 
109
 
110
  def clear():
@@ -115,60 +22,19 @@ def clear():
115
  )
116
 
117
 
118
- style = """
119
- div.dl1 div.upload-container {
120
- height: 350px;
121
- max-height: 350px;
122
- }
123
-
124
- div.dl2 {
125
- max-height: 200px;
126
- }
127
 
128
- div.dl2 img {
129
- max-height: 200px;
130
- }
131
-
132
- .submit {
133
- display: inline-block;
134
- padding: 10px 20px;
135
- font-size: 16px;
136
- font-weight: bold;
137
- text-align: center;
138
- text-decoration: none;
139
- cursor: pointer;
140
- border: var(--button-border-width) solid var(--button-primary-border-color);
141
- background: var(--button-primary-background-fill);
142
- color: var(--button-primary-text-color);
143
- border-radius: 8px;
144
- transition: all 0.3s ease;
145
- }
146
-
147
- .submit[disabled] {
148
- cursor: not-allowed;
149
- opacity: 0.6;
150
- }
151
-
152
- .submit:hover:not([disabled]) {
153
- border-color: var(--button-primary-border-color-hover);
154
- background: var(--button-primary-background-fill-hover);
155
- color: var(--button-primary-text-color-hover);
156
- }
157
-
158
- .submit:active:not([disabled]) {
159
- transform: scale(0.98);
160
- }
161
- """
162
-
163
- with gr.Blocks(css=style) as demo:
164
  with gr.Row():
165
  with gr.Column(scale=2, elem_classes="dl1"):
166
  input_image = gr.Image(type="pil")
167
  with gr.Row():
 
 
 
168
  submit = gr.Button(
169
  value="Submit", interactive=True, scale=1, elem_classes="submit"
170
  )
171
- clear_btn = gr.Button(value="Clear", interactive=True, scale=1)
172
  with gr.Column(scale=1, elem_classes="dl4"):
173
  output_image = gr.Image(scale=1, elem_classes="dl2")
174
  output_label = gr.Label(num_top_classes=3, scale=1, elem_classes="dl3")
@@ -186,7 +52,7 @@ with gr.Blocks(css=style) as demo:
186
  )
187
 
188
  submit.click(
189
- fn=predict,
190
  inputs=[input_image],
191
  outputs=[output_image, output_label],
192
  queue=True,
@@ -194,11 +60,7 @@ with gr.Blocks(css=style) as demo:
194
  clear_btn.click(
195
  fn=clear,
196
  inputs=[],
197
- outputs=[
198
- input_image,
199
- output_image,
200
- output_label,
201
- ],
202
  queue=True,
203
  )
204
 
 
1
+ """
2
+ File: app.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: Description: Main application file for Facial_Expression_Recognition.
5
+ The file defines the Gradio interface, sets up the main blocks,
6
+ and includes event handlers for various components.
7
+ License: MIT License
8
+ """
9
 
10
  import gradio as gr
11
 
12
+ # Importing necessary components for the Gradio app
13
+ from app.description import DESCRIPTION
14
+ from app.app_utils import preprocess_and_predict
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
 
17
  def clear():
 
22
  )
23
 
24
 
25
+ with gr.Blocks(css="app.css") as demo:
26
+ gr.Markdown(value=DESCRIPTION)
 
 
 
 
 
 
 
27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  with gr.Row():
29
  with gr.Column(scale=2, elem_classes="dl1"):
30
  input_image = gr.Image(type="pil")
31
  with gr.Row():
32
+ clear_btn = gr.Button(
33
+ value="Clear", interactive=True, scale=1, elem_classes="clear"
34
+ )
35
  submit = gr.Button(
36
  value="Submit", interactive=True, scale=1, elem_classes="submit"
37
  )
 
38
  with gr.Column(scale=1, elem_classes="dl4"):
39
  output_image = gr.Image(scale=1, elem_classes="dl2")
40
  output_label = gr.Label(num_top_classes=3, scale=1, elem_classes="dl3")
 
52
  )
53
 
54
  submit.click(
55
+ fn=preprocess_and_predict,
56
  inputs=[input_image],
57
  outputs=[output_image, output_label],
58
  queue=True,
 
60
  clear_btn.click(
61
  fn=clear,
62
  inputs=[],
63
+ outputs=[input_image, output_image, output_label],
 
 
 
 
64
  queue=True,
65
  )
66
 
app/__init__.py ADDED
File without changes
app/app_utils.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: app_utils.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: This module contains utility functions for facial expression recognition application.
5
+ License: MIT License
6
+ """
7
+
8
+ import torch
9
+ import numpy as np
10
+ import mediapipe as mp
11
+ from PIL import Image
12
+
13
+ # Importing necessary components for the Gradio app
14
+ from app.model import pth_model, pth_processing
15
+ from app.face_utils import get_box
16
+ from app.config import DICT_EMO
17
+
18
+
19
+ mp_face_mesh = mp.solutions.face_mesh
20
+
21
+
22
+ def preprocess_and_predict(inp):
23
+ inp = np.array(inp)
24
+
25
+ if inp is None:
26
+ return None, None
27
+
28
+ try:
29
+ h, w = inp.shape[:2]
30
+ except Exception:
31
+ return None, None
32
+
33
+ with mp_face_mesh.FaceMesh(
34
+ max_num_faces=1,
35
+ refine_landmarks=False,
36
+ min_detection_confidence=0.5,
37
+ min_tracking_confidence=0.5,
38
+ ) as face_mesh:
39
+ results = face_mesh.process(inp)
40
+ if results.multi_face_landmarks:
41
+ for fl in results.multi_face_landmarks:
42
+ startX, startY, endX, endY = get_box(fl, w, h)
43
+ cur_face = inp[startY:endY, startX:endX]
44
+ cur_face_n = pth_processing(Image.fromarray(cur_face))
45
+ prediction = (
46
+ torch.nn.functional.softmax(pth_model(cur_face_n), dim=1)
47
+ .detach()
48
+ .numpy()[0]
49
+ )
50
+ confidences = {DICT_EMO[i]: float(prediction[i]) for i in range(7)}
51
+
52
+ return cur_face, confidences
app/config.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: config.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: Configuration file.
5
+ License: MIT License
6
+ """
7
+
8
+ import toml
9
+ from typing import Dict
10
+ from types import SimpleNamespace
11
+
12
+
13
+ def flatten_dict(prefix: str, d: Dict) -> Dict:
14
+ result = {}
15
+
16
+ for k, v in d.items():
17
+ if isinstance(v, dict):
18
+ result.update(flatten_dict(f"{prefix}{k}_", v))
19
+ else:
20
+ result[f"{prefix}{k}"] = v
21
+
22
+ return result
23
+
24
+
25
+ config = toml.load("config.toml")
26
+
27
+ config_data = flatten_dict("", config)
28
+
29
+ config_data = SimpleNamespace(**config_data)
30
+
31
+ DICT_EMO = {
32
+ 0: "Neutral",
33
+ 1: "Happiness",
34
+ 2: "Sadness",
35
+ 3: "Surprise",
36
+ 4: "Fear",
37
+ 5: "Disgust",
38
+ 6: "Anger",
39
+ }
app/description.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: description.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: Project description for the Gradio app.
5
+ License: MIT License
6
+ """
7
+
8
+ # Importing necessary components for the Gradio app
9
+ from app.config import config_data
10
+
11
+ DESCRIPTION = f"""\
12
+ # Facial_Expression_Recognition
13
+ <div class="app-flex-container">
14
+ <img src="https://img.shields.io/badge/version-v{config_data.APP_VERSION}-rc0" alt="Version">
15
+ <a href="https://visitorbadge.io/status?path=https%3A%2F%2Fgit.lnyan.com%2Fspaces%2FElenaRyumina%2FFacial_Expression_Recognition"><img src="https://api.visitorbadge.io/api/combined?path=https%3A%2F%2Fgit.lnyan.com%2Fspaces%2FElenaRyumina%2FFacial_Expression_Recognition&countColor=%23263759&style=flat" /></a>
16
+ </div>
17
+ """
app/face_utils.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: face_utils.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: This module contains utility functions related to facial landmarks and image processing.
5
+ License: MIT License
6
+ """
7
+
8
+ import numpy as np
9
+ import math
10
+
11
+
12
+ def norm_coordinates(normalized_x, normalized_y, image_width, image_height):
13
+ x_px = min(math.floor(normalized_x * image_width), image_width - 1)
14
+ y_px = min(math.floor(normalized_y * image_height), image_height - 1)
15
+ return x_px, y_px
16
+
17
+
18
+ def get_box(fl, w, h):
19
+ idx_to_coors = {}
20
+ for idx, landmark in enumerate(fl.landmark):
21
+ landmark_px = norm_coordinates(landmark.x, landmark.y, w, h)
22
+ if landmark_px:
23
+ idx_to_coors[idx] = landmark_px
24
+
25
+ x_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 0])
26
+ y_min = np.min(np.asarray(list(idx_to_coors.values()))[:, 1])
27
+ endX = np.max(np.asarray(list(idx_to_coors.values()))[:, 0])
28
+ endY = np.max(np.asarray(list(idx_to_coors.values()))[:, 1])
29
+
30
+ (startX, startY) = (max(0, x_min), max(0, y_min))
31
+ (endX, endY) = (min(w - 1, endX), min(h - 1, endY))
32
+
33
+ return startX, startY, endX, endY
app/model.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File: model.py
3
+ Author: Elena Ryumina and Dmitry Ryumin
4
+ Description: This module provides functions for loading and processing a pre-trained deep learning model
5
+ for facial expression recognition.
6
+ License: MIT License
7
+ """
8
+
9
+ import torch
10
+ import requests
11
+ from PIL import Image
12
+ from torchvision import transforms
13
+
14
+ # Importing necessary components for the Gradio app
15
+ from app.config import config_data
16
+
17
+
18
+ def load_model(model_url, model_path):
19
+ try:
20
+ # Загрузка модели
21
+ with requests.get(model_url, stream=True) as response:
22
+ with open(model_path, "wb") as file:
23
+ for chunk in response.iter_content(chunk_size=8192):
24
+ file.write(chunk)
25
+ return torch.jit.load(model_path).eval()
26
+ except Exception as e:
27
+ print(f"Error loading model: {e}")
28
+ return None
29
+
30
+
31
+ # Загрузите модель
32
+ pth_model = load_model(config_data.model_url, config_data.model_path)
33
+
34
+
35
+ def pth_processing(fp):
36
+ class PreprocessInput(torch.nn.Module):
37
+ def init(self):
38
+ super(PreprocessInput, self).init()
39
+
40
+ def forward(self, x):
41
+ x = x.to(torch.float32)
42
+ x = torch.flip(x, dims=(0,))
43
+ x[0, :, :] -= 91.4953
44
+ x[1, :, :] -= 103.8827
45
+ x[2, :, :] -= 131.0912
46
+ return x
47
+
48
+ def get_img_torch(img, target_size=(224, 224)):
49
+ transform = transforms.Compose([transforms.PILToTensor(), PreprocessInput()])
50
+ img = img.resize(target_size, Image.Resampling.NEAREST)
51
+ img = transform(img)
52
+ img = torch.unsqueeze(img, 0)
53
+ return img
54
+
55
+ return get_img_torch(fp)
config.toml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ APP_VERSION = "0.1.0"
2
+
3
+ [model]
4
+ url = "https://huggingface.co/ElenaRyumina/face_emotion_recognition/resolve/main/FER_static_ResNet50_AffectNet.pth"
5
+ path = "FER_static_ResNet50_AffectNet.pth"
requirements.txt CHANGED
@@ -3,5 +3,5 @@ requests==2.31.0
3
  torch==2.1.2
4
  torchaudio==2.1.2
5
  torchvision==0.16.2
6
- mediapipe==0.9.0.1
7
  pillow==10.2.0
 
3
  torch==2.1.2
4
  torchaudio==2.1.2
5
  torchvision==0.16.2
6
+ mediapipe==0.10.9
7
  pillow==10.2.0