Hobson cetinca commited on
Commit
5892472
0 Parent(s):

Duplicate from TangibleAI/mathtext-fastapi

Browse files

Co-authored-by: Cetin Cakir <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ .idea/
161
+
162
+ *history_sentiment*
163
+ *history_text2int*
.gitlab-ci.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Official Python language image.
2
+ test_py38:
3
+ image: python:3.8
4
+ before_script:
5
+ - python -v
6
+ - pip install -r requirements.txt
7
+ script:
8
+ - pytest --verbose
9
+
10
+ test_py39:
11
+ image: python:3.9
12
+ before_script:
13
+ - python -v
14
+ - pip install -r requirements.txt
15
+ script:
16
+ - pytest --verbose
CHANGELOG.md ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ## [0.0.12](https://gitlab.com/tangibleai/community/mathtext-fastapi/-/tags/0.0.12)
3
+
4
+ Improve NLU capabilities
5
+ - Improved handling for integers (1), floats (1.0), and text numbers (one)
6
+ - Integrates fuzzy keyword matching for 'easier', 'exit', 'harder', 'hint', 'next', 'stop'
7
+ - Integrates intent classification for user messages
8
+ - Improved conversation management system
9
+ - Created a data-driven quiz prototype
10
+
11
+
12
+ ## [0.0.0](https://gitlab.com/tangibleai/community/mathtext-fastapi/-/tags/0.0.0)
13
+
14
+ Initial release
15
+ - Basic text to integer NLU evaluation of user responses
16
+ - Basic sentiment analysis evaluation of user responses
17
+ - Prototype conversation manager using finite state machines
18
+ - Support for logging of user message data
Dockerfile ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://huggingface.co/docs/hub/spaces-sdks-docker-first-demo
2
+
3
+ FROM python:3.9
4
+
5
+ WORKDIR /code
6
+
7
+ COPY ./requirements.txt /code/requirements.txt
8
+
9
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
10
+
11
+ RUN useradd -m -u 1000 user
12
+
13
+ USER user
14
+
15
+ ENV HOME=/home/user \
16
+ PATH=/home/user/.local/bin:$PATH
17
+
18
+ WORKDIR $HOME/app
19
+
20
+ COPY --chown=user . $HOME/app
21
+
22
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Mathtext Fastapi
3
+ emoji: 🐨
4
+ colorFrom: blue
5
+ colorTo: red
6
+ sdk: docker
7
+ pinned: false
8
+ license: agpl-3.0
9
+ duplicated_from: TangibleAI/mathtext-fastapi
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """FastAPI endpoint
2
+ To run locally use 'uvicorn app:app --host localhost --port 7860'
3
+ """
4
+ import ast
5
+ import scripts.quiz.generators as generators
6
+ import scripts.quiz.hints as hints
7
+ import scripts.quiz.questions as questions
8
+ import scripts.quiz.utils as utils
9
+ import sentry_sdk
10
+
11
+ from fastapi import FastAPI, Request
12
+ from fastapi.responses import JSONResponse
13
+ from fastapi.staticfiles import StaticFiles
14
+ from fastapi.templating import Jinja2Templates
15
+ from mathtext.sentiment import sentiment
16
+ from mathtext.text2int import text2int
17
+ from pydantic import BaseModel
18
+
19
+ from mathtext_fastapi.logging import prepare_message_data_for_logging
20
+ from mathtext_fastapi.conversation_manager import manage_conversation_response
21
+ from mathtext_fastapi.nlu import evaluate_message_with_nlu
22
+ from mathtext_fastapi.nlu import run_intent_classification
23
+
24
+ sentry_sdk.init(
25
+ dsn="https://[email protected]/4504896688881664",
26
+
27
+ # Set traces_sample_rate to 1.0 to capture 100%
28
+ # of transactions for performance monitoring.
29
+ # We recommend adjusting this value in production,
30
+ traces_sample_rate=0.20,
31
+ )
32
+
33
+ app = FastAPI()
34
+
35
+ app.mount("/static", StaticFiles(directory="static"), name="static")
36
+
37
+ templates = Jinja2Templates(directory="templates")
38
+
39
+
40
+ class Text(BaseModel):
41
+ content: str = ""
42
+
43
+
44
+ @app.get("/")
45
+ def home(request: Request):
46
+ return templates.TemplateResponse("home.html", {"request": request})
47
+
48
+
49
+ @app.get("/sentry-debug")
50
+ async def trigger_error():
51
+ division_by_zero = 1 / 0
52
+
53
+
54
+ @app.post("/hello")
55
+ def hello(content: Text = None):
56
+ content = {"message": f"Hello {content.content}!"}
57
+ return JSONResponse(content=content)
58
+
59
+
60
+ @app.post("/sentiment-analysis")
61
+ def sentiment_analysis_ep(content: Text = None):
62
+ ml_response = sentiment(content.content)
63
+ content = {"message": ml_response}
64
+ return JSONResponse(content=content)
65
+
66
+
67
+ @app.post("/text2int")
68
+ def text2int_ep(content: Text = None):
69
+ ml_response = text2int(content.content)
70
+ content = {"message": ml_response}
71
+ return JSONResponse(content=content)
72
+
73
+
74
+ @app.post("/manager")
75
+ async def programmatic_message_manager(request: Request):
76
+ """
77
+ Calls conversation management function to determine the next state
78
+
79
+ Input
80
+ request.body: dict - message data for the most recent user response
81
+ {
82
+ "author_id": "+47897891",
83
+ "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09",
84
+ "author_type": "OWNER",
85
+ "message_body": "a test message",
86
+ "message_direction": "inbound",
87
+ "message_id": "ABJAK64jlk3-agjkl2QHFAFH",
88
+ "message_inserted_at": "2022-07-05T04:00:34.03352Z",
89
+ "message_updated_at": "2023-02-14T03:54:19.342950Z",
90
+ }
91
+
92
+ Output
93
+ context: dict - the information for the current state
94
+ {
95
+ "user": "47897891",
96
+ "state": "welcome-message-state",
97
+ "bot_message": "Welcome to Rori!",
98
+ "user_message": "",
99
+ "type": "ask"
100
+ }
101
+ """
102
+ data_dict = await request.json()
103
+ context = manage_conversation_response(data_dict)
104
+ return JSONResponse(context)
105
+
106
+
107
+ @app.post("/intent-classification")
108
+ def intent_classification_ep(content: Text = None):
109
+ ml_response = run_intent_classification(content.content)
110
+ content = {"message": ml_response}
111
+ return JSONResponse(content=content)
112
+
113
+
114
+ @app.post("/nlu")
115
+ async def evaluate_user_message_with_nlu_api(request: Request):
116
+ """ Calls nlu evaluation and returns the nlu_response
117
+
118
+ Input
119
+ - request.body: json - message data for the most recent user response
120
+
121
+ Output
122
+ - int_data_dict or sent_data_dict: dict - the type of NLU run and result
123
+ {'type':'integer', 'data': '8', 'confidence': 0}
124
+ {'type':'sentiment', 'data': 'negative', 'confidence': 0.99}
125
+ """
126
+ data_dict = await request.json()
127
+ message_data = data_dict.get('message_data', '')
128
+ nlu_response = evaluate_message_with_nlu(message_data)
129
+ return JSONResponse(content=nlu_response)
130
+
131
+
132
+ @app.post("/question")
133
+ async def ask_math_question(request: Request):
134
+ """Generate a question and return it as response along with question data
135
+
136
+ Input
137
+ request.body: json - amount of correct and incorrect answers in the account
138
+ {
139
+ 'number_correct': 0,
140
+ 'number_incorrect': 0,
141
+ 'level': 'easy'
142
+ }
143
+
144
+ Output
145
+ context: dict - the information for the current state
146
+ {
147
+ 'text': 'What is 1+2?',
148
+ 'question_numbers': [1,2,3], #3 numbers - current number, ordinal number, times
149
+ 'right_answer': 3,
150
+ 'number_correct': 0,
151
+ 'number_incorrect': 0,
152
+ 'hints_used': 0
153
+ }
154
+ """
155
+ data_dict = await request.json()
156
+ message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
157
+ right_answers = message_data['number_correct']
158
+ wrong_answers = message_data['number_incorrect']
159
+ level = message_data['level']
160
+
161
+ return JSONResponse(generators.start_interactive_math(right_answers, wrong_answers, level))
162
+
163
+
164
+ @app.post("/hint")
165
+ async def get_hint(request: Request):
166
+ """Generate a hint and return it as response along with hint data
167
+
168
+ Input
169
+ request.body:
170
+ {
171
+ 'question_numbers': [1,2,3], #3 numbers - current number, ordinal number, times
172
+ 'right_answer': 3,
173
+ 'number_correct': 0,
174
+ 'number_incorrect': 0,
175
+ 'level': 'easy',
176
+ 'hints_used': 0
177
+ }
178
+
179
+ Output
180
+ context: dict - the information for the current state
181
+ {
182
+ 'text': 'What is 1+2?',
183
+ 'question_numbers': [1,2,3], #2 or 3 numbers
184
+ 'right_answer': 3,
185
+ 'number_correct': 0,
186
+ 'number_incorrect': 0,
187
+ 'level': 'easy',
188
+ 'hints_used': 0
189
+ }
190
+ """
191
+ data_dict = await request.json()
192
+ message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
193
+ question_numbers = message_data['question_numbers']
194
+ right_answer = message_data['right_answer']
195
+ number_correct = message_data['number_correct']
196
+ number_incorrect = message_data['number_incorrect']
197
+ level = message_data['level']
198
+ hints_used = message_data['hints_used']
199
+
200
+ return JSONResponse(hints.generate_hint(question_numbers, right_answer, number_correct, number_incorrect, level, hints_used))
201
+
202
+
203
+ @app.post("/generate_question")
204
+ async def generate_question(request: Request):
205
+ """Generate a bare question and return it as response
206
+
207
+ Input
208
+ request.body: json - level
209
+ {
210
+ 'level': 'easy'
211
+ }
212
+
213
+ Output
214
+ context: dict - the information for the current state
215
+ {
216
+ "question": "Let's count up by 2s. What number is next if we start from 10?
217
+ 6 8 10 ..."
218
+ }
219
+ """
220
+ data_dict = await request.json()
221
+ message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
222
+ level = message_data['level']
223
+
224
+ return JSONResponse(questions.generate_question_data(level)['question'])
225
+
226
+
227
+ @app.post("/numbers_by_level")
228
+ async def get_numbers_by_level(request: Request):
229
+ """Generate three numbers and return them as response
230
+
231
+ Input
232
+ request.body: json - level
233
+ {
234
+ 'level': 'easy'
235
+ }
236
+
237
+ Output
238
+ context: dict - three generated numbers for specified level
239
+ {
240
+ "current_number": 10,
241
+ "ordinal_number": 2,
242
+ "times": 1
243
+ }
244
+ """
245
+ data_dict = await request.json()
246
+ message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
247
+ level = message_data['level']
248
+ return JSONResponse(questions.generate_numbers_by_level(level))
249
+
250
+
251
+ @app.post("/number_sequence")
252
+ async def get_number_sequence(request: Request):
253
+ """Generate a number sequence
254
+
255
+ Input
256
+ request.body: json - level
257
+ {
258
+ "current_number": 10,
259
+ "ordinal_number": 2,
260
+ "times": 1
261
+ }
262
+
263
+ Output
264
+ one of following strings with (numbers differ):
265
+ ... 1 2 3
266
+ 1 2 3 ...
267
+ """
268
+ data_dict = await request.json()
269
+ message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
270
+ cur_num = message_data['current_number']
271
+ ord_num = message_data['ordinal_number']
272
+ times = message_data['times']
273
+ return JSONResponse(questions.generate_number_sequence(cur_num, ord_num, times))
274
+
275
+
276
+ @app.post("/level")
277
+ async def get_next_level(request: Request):
278
+ """Depending on current level and desire to level up/down return next level
279
+
280
+ Input
281
+ request.body: json - level
282
+ {
283
+ "current_level": "easy",
284
+ "level_up": True
285
+ }
286
+
287
+ Output
288
+ Literal - "easy", "medium" or "hard"
289
+ """
290
+ data_dict = await request.json()
291
+ message_data = ast.literal_eval(data_dict.get('message_data', '').get('message_body', ''))
292
+ cur_level = message_data['current_level']
293
+ level_up = message_data['level_up']
294
+ return JSONResponse(utils.get_next_level(cur_level, level_up))
docs/transitions_math_quiz_example.ipynb ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 19,
6
+ "id": "d3da0422",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import random\n",
11
+ "\n",
12
+ "from transitions import State, Machine"
13
+ ]
14
+ },
15
+ {
16
+ "cell_type": "code",
17
+ "execution_count": 20,
18
+ "id": "07cfb740",
19
+ "metadata": {},
20
+ "outputs": [],
21
+ "source": [
22
+ "class MathQuizFSM(object):\n",
23
+ " states = [\n",
24
+ " 'quiz_start', \n",
25
+ " 'quiz_question', \n",
26
+ " 'quiz_end'\n",
27
+ " ]\n",
28
+ "\n",
29
+ " transitions = [\n",
30
+ " ['ask_second_question', 'quiz_start', 'quiz_question'],\n",
31
+ " ['ask_next_question', 'quiz_question', 'quiz_question'],\n",
32
+ " ['exit', 'quiz_start', 'quiz_end'],\n",
33
+ " ['exit', 'quiz_question', 'quiz_end'],\n",
34
+ " ]\n",
35
+ " \n",
36
+ " \n",
37
+ " def __init__(self):\n",
38
+ " # Instantiate the FSM\n",
39
+ " self.machine = Machine(model=self, states=MathQuizFSM.states, transitions=MathQuizFSM.transitions,initial='quiz_start')\n",
40
+ "\n",
41
+ " # Instantiate variables necessary for tracking activity\n",
42
+ " self.question_nums = [2, 3]\n",
43
+ " self.correct_answer = 5\n",
44
+ " self.student_answer = 0\n",
45
+ " self.is_correct_answer = False\n",
46
+ " self.response_text = \"What is 2 + 3?\"\n",
47
+ "\n",
48
+ " # Define transitions\n",
49
+ "# self.machine.add_transition('ask_second_question', 'quiz_start', 'quiz_question')\n",
50
+ "# self.machine.add_transition('ask_next_question', 'quiz_question', 'quiz_question')\n",
51
+ "# self.machine.add_transition('exit', 'quiz_start', 'quiz_end')\n",
52
+ "# self.machine.add_transition('exit', 'quiz_question', 'quiz_end')\n",
53
+ "\n",
54
+ " # Define functions to run on transitions\n",
55
+ " self.machine.on_enter_quiz_question('generate_math_problem')\n",
56
+ " self.machine.on_exit_quiz_question('validate_answer')\n",
57
+ "\n",
58
+ " def validate_answer(self):\n",
59
+ " if self.student_answer == 'exit':\n",
60
+ " self.machine.set_state('quiz_end')\n",
61
+ " return [\"Come back any time!\"]\n",
62
+ " elif self.correct_answer == self.student_answer:\n",
63
+ " self.machine.set_state('quiz_question')\n",
64
+ " self.generate_math_problem()\n",
65
+ " return ['Great job!', self.response_text]\n",
66
+ " else:\n",
67
+ " return [\"That's not quite right. Try again.\",self.response_text]\n",
68
+ " \n",
69
+ " def generate_math_problem(self):\n",
70
+ " self.question_nums = random.sample(range(1,100),2)\n",
71
+ " self.response_text = f\"What is {self.question_nums[0]} + {self.question_nums[1]}\"\n",
72
+ " self.correct_answer = self.question_nums[0] + self.question_nums[1]\n"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": 21,
78
+ "id": "ebdf92ae",
79
+ "metadata": {},
80
+ "outputs": [],
81
+ "source": [
82
+ "test = MathQuizFSM()"
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": 22,
88
+ "id": "92024fcc",
89
+ "metadata": {},
90
+ "outputs": [
91
+ {
92
+ "data": {
93
+ "text/plain": [
94
+ "'quiz_start'"
95
+ ]
96
+ },
97
+ "execution_count": 22,
98
+ "metadata": {},
99
+ "output_type": "execute_result"
100
+ }
101
+ ],
102
+ "source": [
103
+ "# Set as `quiz_start` due to the initial setting in Line 10\n",
104
+ "test.state"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "code",
109
+ "execution_count": 23,
110
+ "id": "fd1ba433",
111
+ "metadata": {},
112
+ "outputs": [
113
+ {
114
+ "data": {
115
+ "text/plain": [
116
+ "['quiz_start', 'quiz_question', 'quiz_end']"
117
+ ]
118
+ },
119
+ "execution_count": 23,
120
+ "metadata": {},
121
+ "output_type": "execute_result"
122
+ }
123
+ ],
124
+ "source": [
125
+ "# Available states for the quiz module\n",
126
+ "test.states"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": 24,
132
+ "id": "bb190089",
133
+ "metadata": {},
134
+ "outputs": [
135
+ {
136
+ "name": "stdout",
137
+ "output_type": "stream",
138
+ "text": [
139
+ "What is 2 + 3?\n",
140
+ "Initial Correct Answer: 5\n",
141
+ "Initial Student Answer: 0\n"
142
+ ]
143
+ }
144
+ ],
145
+ "source": [
146
+ "# When the FSM is created, it comes with a default question/answer pair loaded\n",
147
+ "print(test.response_text)\n",
148
+ "print(f\"Initial Correct Answer: {test.correct_answer}\")\n",
149
+ "print(f\"Initial Student Answer: {test.student_answer}\")"
150
+ ]
151
+ },
152
+ {
153
+ "cell_type": "code",
154
+ "execution_count": 25,
155
+ "id": "3de7c4e0",
156
+ "metadata": {},
157
+ "outputs": [
158
+ {
159
+ "data": {
160
+ "text/plain": [
161
+ "[\"That's not quite right. Try again.\", 'What is 2 + 3?']"
162
+ ]
163
+ },
164
+ "execution_count": 25,
165
+ "metadata": {},
166
+ "output_type": "execute_result"
167
+ }
168
+ ],
169
+ "source": [
170
+ "# Calling the validation fails because the answer is wrong. The state remains the same.\n",
171
+ "test.validate_answer()"
172
+ ]
173
+ },
174
+ {
175
+ "cell_type": "code",
176
+ "execution_count": 26,
177
+ "id": "4935b470",
178
+ "metadata": {},
179
+ "outputs": [],
180
+ "source": [
181
+ "# The student tries again\n",
182
+ "test.student_answer = 5"
183
+ ]
184
+ },
185
+ {
186
+ "cell_type": "code",
187
+ "execution_count": 27,
188
+ "id": "03722434",
189
+ "metadata": {},
190
+ "outputs": [
191
+ {
192
+ "data": {
193
+ "text/plain": [
194
+ "['Great job!', 'What is 58 + 89']"
195
+ ]
196
+ },
197
+ "execution_count": 27,
198
+ "metadata": {},
199
+ "output_type": "execute_result"
200
+ }
201
+ ],
202
+ "source": [
203
+ "# Since the student answered correctly, MathQuizFSM generates a new math problem\n",
204
+ "test.validate_answer()"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": 28,
210
+ "id": "d98a4d5b",
211
+ "metadata": {},
212
+ "outputs": [
213
+ {
214
+ "data": {
215
+ "text/plain": [
216
+ "'quiz_question'"
217
+ ]
218
+ },
219
+ "execution_count": 28,
220
+ "metadata": {},
221
+ "output_type": "execute_result"
222
+ }
223
+ ],
224
+ "source": [
225
+ "# It will repeatedly re-activate the same state\n",
226
+ "test.state"
227
+ ]
228
+ },
229
+ {
230
+ "cell_type": "code",
231
+ "execution_count": 29,
232
+ "id": "76c8a5b2",
233
+ "metadata": {},
234
+ "outputs": [
235
+ {
236
+ "data": {
237
+ "text/plain": [
238
+ "[\"That's not quite right. Try again.\", 'What is 58 + 89']"
239
+ ]
240
+ },
241
+ "execution_count": 29,
242
+ "metadata": {},
243
+ "output_type": "execute_result"
244
+ }
245
+ ],
246
+ "source": [
247
+ "test.validate_answer()"
248
+ ]
249
+ },
250
+ {
251
+ "cell_type": "code",
252
+ "execution_count": 30,
253
+ "id": "ec0a7e6a",
254
+ "metadata": {},
255
+ "outputs": [],
256
+ "source": [
257
+ "test.student_answer = 128"
258
+ ]
259
+ },
260
+ {
261
+ "cell_type": "code",
262
+ "execution_count": 31,
263
+ "id": "a093ff27",
264
+ "metadata": {},
265
+ "outputs": [
266
+ {
267
+ "data": {
268
+ "text/plain": [
269
+ "[\"That's not quite right. Try again.\", 'What is 58 + 89']"
270
+ ]
271
+ },
272
+ "execution_count": 31,
273
+ "metadata": {},
274
+ "output_type": "execute_result"
275
+ }
276
+ ],
277
+ "source": [
278
+ "test.validate_answer()"
279
+ ]
280
+ },
281
+ {
282
+ "cell_type": "code",
283
+ "execution_count": 32,
284
+ "id": "f992d34d",
285
+ "metadata": {},
286
+ "outputs": [],
287
+ "source": [
288
+ "test.student_answer = 'exit'"
289
+ ]
290
+ },
291
+ {
292
+ "cell_type": "code",
293
+ "execution_count": 33,
294
+ "id": "28800a2b",
295
+ "metadata": {},
296
+ "outputs": [
297
+ {
298
+ "data": {
299
+ "text/plain": [
300
+ "['Come back any time!']"
301
+ ]
302
+ },
303
+ "execution_count": 33,
304
+ "metadata": {},
305
+ "output_type": "execute_result"
306
+ }
307
+ ],
308
+ "source": [
309
+ "test.validate_answer()"
310
+ ]
311
+ },
312
+ {
313
+ "cell_type": "code",
314
+ "execution_count": 34,
315
+ "id": "360ef774",
316
+ "metadata": {},
317
+ "outputs": [
318
+ {
319
+ "data": {
320
+ "text/plain": [
321
+ "'quiz_end'"
322
+ ]
323
+ },
324
+ "execution_count": 34,
325
+ "metadata": {},
326
+ "output_type": "execute_result"
327
+ }
328
+ ],
329
+ "source": [
330
+ "test.state"
331
+ ]
332
+ },
333
+ {
334
+ "cell_type": "code",
335
+ "execution_count": null,
336
+ "id": "3f0392ae",
337
+ "metadata": {},
338
+ "outputs": [],
339
+ "source": []
340
+ }
341
+ ],
342
+ "metadata": {
343
+ "kernelspec": {
344
+ "display_name": "base",
345
+ "language": "python",
346
+ "name": "python3"
347
+ },
348
+ "language_info": {
349
+ "codemirror_mode": {
350
+ "name": "ipython",
351
+ "version": 3
352
+ },
353
+ "file_extension": ".py",
354
+ "mimetype": "text/x-python",
355
+ "name": "python",
356
+ "nbconvert_exporter": "python",
357
+ "pygments_lexer": "ipython3",
358
+ "version": "3.9.7"
359
+ },
360
+ "vscode": {
361
+ "interpreter": {
362
+ "hash": "32cf04bfac80a5e1e74e86fca42ae7f3079b15fa61041a60732bc19e88699268"
363
+ }
364
+ }
365
+ },
366
+ "nbformat": 4,
367
+ "nbformat_minor": 5
368
+ }
mathtext_fastapi/__init__.py ADDED
File without changes
mathtext_fastapi/conversation_manager.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import dill
3
+ import os
4
+ import json
5
+ import jsonpickle
6
+ import pickle
7
+ import random
8
+ import requests
9
+
10
+ from dotenv import load_dotenv
11
+ from mathtext_fastapi.nlu import evaluate_message_with_nlu
12
+ from mathtext_fastapi.math_quiz_fsm import MathQuizFSM
13
+ from mathtext_fastapi.math_subtraction_fsm import MathSubtractionFSM
14
+ from supabase import create_client
15
+ from transitions import Machine
16
+
17
+ from scripts.quiz.generators import start_interactive_math
18
+ from scripts.quiz.hints import generate_hint
19
+
20
+ load_dotenv()
21
+
22
+ SUPA = create_client(
23
+ os.environ.get('SUPABASE_URL'),
24
+ os.environ.get('SUPABASE_KEY')
25
+ )
26
+
27
+
28
+ def create_text_message(message_text, whatsapp_id):
29
+ """ Fills a template with input values to send a text message to Whatsapp
30
+
31
+ Inputs
32
+ - message_text: str - the content that the message should display
33
+ - whatsapp_id: str - the message recipient's phone number
34
+
35
+ Outputs
36
+ - message_data: dict - a preformatted template filled with inputs
37
+ """
38
+ message_data = {
39
+ "preview_url": False,
40
+ "recipient_type": "individual",
41
+ "to": whatsapp_id,
42
+ "type": "text",
43
+ "text": {
44
+ "body": message_text
45
+ }
46
+ }
47
+ return message_data
48
+
49
+
50
+ def create_button_objects(button_options):
51
+ """ Creates a list of button objects using the input values
52
+ Input
53
+ - button_options: list - a list of text to be displayed in buttons
54
+
55
+ Output
56
+ - button_arr: list - preformatted button objects filled with the inputs
57
+
58
+ NOTE: Not fully implemented and tested
59
+ """
60
+ button_arr = []
61
+ for option in button_options:
62
+ button_choice = {
63
+ "type": "reply",
64
+ "reply": {
65
+ "id": "inquiry-yes",
66
+ "title": option['text']
67
+ }
68
+ }
69
+ button_arr.append(button_choice)
70
+ return button_arr
71
+
72
+
73
+ def create_interactive_message(message_text, button_options, whatsapp_id):
74
+ """ Fills a template to create a button message for Whatsapp
75
+
76
+ * NOTE: Not fully implemented and tested
77
+ * NOTE/TODO: It is possible to create other kinds of messages
78
+ with the 'interactive message' template
79
+ * Documentation:
80
+ https://whatsapp.turn.io/docs/api/messages#interactive-messages
81
+
82
+ Inputs
83
+ - message_text: str - the content that the message should display
84
+ - button_options: list - what each button option should display
85
+ - whatsapp_id: str - the message recipient's phone number
86
+ """
87
+ button_arr = create_button_objects(button_options)
88
+
89
+ data = {
90
+ "to": whatsapp_id,
91
+ "type": "interactive",
92
+ "interactive": {
93
+ "type": "button",
94
+ # "header": { },
95
+ "body": {
96
+ "text": message_text
97
+ },
98
+ # "footer": { },
99
+ "action": {
100
+ "buttons": button_arr
101
+ }
102
+ }
103
+ }
104
+ return data
105
+
106
+
107
+ def pickle_and_encode_state_machine(state_machine):
108
+ dump = pickle.dumps(state_machine)
109
+ dump_encoded = base64.b64encode(dump).decode('utf-8')
110
+ return dump_encoded
111
+
112
+
113
+ def manage_math_quiz_fsm(user_message, contact_uuid, type):
114
+ fsm_check = SUPA.table('state_machines').select("*").eq(
115
+ "contact_uuid",
116
+ contact_uuid
117
+ ).execute()
118
+
119
+ # This doesn't allow for when one FSM is present and the other is empty
120
+ """
121
+ 1
122
+ data=[] count=None
123
+
124
+ 2
125
+ data=[{'id': 29, 'contact_uuid': 'j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09', 'addition3': None, 'subtraction': None, 'addition':
126
+
127
+ - but problem is there is no subtraction , but it's assuming there's a subtration
128
+
129
+
130
+ Cases
131
+ - make a completely new record
132
+ - update an existing record with an existing FSM
133
+ - update an existing record without an existing FSM
134
+ """
135
+
136
+
137
+ # Make a completely new entry
138
+ if fsm_check.data == []:
139
+ if type == 'addition':
140
+ math_quiz_state_machine = MathQuizFSM()
141
+ else:
142
+ math_quiz_state_machine = MathSubtractionFSM()
143
+ messages = [math_quiz_state_machine.response_text]
144
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
145
+
146
+ SUPA.table('state_machines').insert({
147
+ 'contact_uuid': contact_uuid,
148
+ f'{type}': dump_encoded
149
+ }).execute()
150
+ # Update an existing record with a new state machine
151
+ elif not fsm_check.data[0][type]:
152
+ if type == 'addition':
153
+ math_quiz_state_machine = MathQuizFSM()
154
+ else:
155
+ math_quiz_state_machine = MathSubtractionFSM()
156
+ messages = [math_quiz_state_machine.response_text]
157
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
158
+
159
+ SUPA.table('state_machines').update({
160
+ f'{type}': dump_encoded
161
+ }).eq(
162
+ "contact_uuid", contact_uuid
163
+ ).execute()
164
+ # Update an existing record with an existing state machine
165
+ elif fsm_check.data[0][type]:
166
+ undump_encoded = base64.b64decode(
167
+ fsm_check.data[0][type].encode('utf-8')
168
+ )
169
+ math_quiz_state_machine = pickle.loads(undump_encoded)
170
+
171
+ math_quiz_state_machine.student_answer = user_message
172
+ math_quiz_state_machine.correct_answer = str(math_quiz_state_machine.correct_answer)
173
+ messages = math_quiz_state_machine.validate_answer()
174
+ dump_encoded = pickle_and_encode_state_machine(math_quiz_state_machine)
175
+ SUPA.table('state_machines').update({
176
+ f'{type}': dump_encoded
177
+ }).eq(
178
+ "contact_uuid", contact_uuid
179
+ ).execute()
180
+ return messages
181
+
182
+
183
+ def use_quiz_module_approach(user_message, context_data):
184
+ print("USER MESSAGE")
185
+ print(user_message)
186
+ print("=======================")
187
+ if user_message == 'add':
188
+ context_result = start_interactive_math()
189
+ message_package = {
190
+ 'messages': [
191
+ "Great, let's do some addition",
192
+ "First, we'll start with single digits.",
193
+ "Type your response as a number. For example, for '1 + 1', you'd write 2."
194
+ ],
195
+ 'input_prompt': context_result['text'],
196
+ 'state': "addition-question-sequence"
197
+ }
198
+
199
+ elif user_message == context_data.get('right_answer'):
200
+ context_result = start_interactive_math(
201
+ context_data['number_correct'],
202
+ context_data['number_incorrect'],
203
+ context_data['level']
204
+ )
205
+ message_package = {
206
+ 'messages': [
207
+ "That's right, great!",
208
+ ],
209
+ 'input_prompt': context_result['text'],
210
+ 'state': "addition-question-sequence"
211
+ }
212
+ else:
213
+ context_result = generate_hint(
214
+ context_data['question_numbers'],
215
+ context_data['right_answer'],
216
+ context_data['number_correct'],
217
+ context_data['number_incorrect'],
218
+ context_data['level'],
219
+ context_data['hints_used']
220
+ )
221
+ message_package = {
222
+ 'messages': [
223
+ context_result['text'],
224
+ ],
225
+ 'input_prompt': context_data['text'],
226
+ 'state': "addition-question-sequence"
227
+ }
228
+ return message_package, context_result
229
+
230
+
231
+ def return_next_conversational_state(context_data, user_message, contact_uuid):
232
+ """ Evaluates the conversation's current state to determine the next state
233
+
234
+ Input
235
+ - context_data: dict - data about the conversation's current state
236
+ - user_message: str - the message the user sent in response to the state
237
+
238
+ Output
239
+ - message_package: dict - a series of messages and prompt to send
240
+ """
241
+ if context_data['user_message'] == '' and \
242
+ context_data['state'] == 'start-conversation':
243
+ message_package = {
244
+ 'messages': [],
245
+ 'input_prompt': "Welcome to our math practice. What would you like to try? Type add or subtract.",
246
+ 'state': "welcome-sequence"
247
+ }
248
+ elif context_data['state'] == 'addition-question-sequence' or \
249
+ user_message == 'add':
250
+
251
+ # Used in FSM
252
+ # messages = manage_math_quiz_fsm(user_message, contact_uuid)
253
+
254
+ # message_package, context_result = use_quiz_module_approach(user_message, context_data)
255
+ messages = manage_math_quiz_fsm(user_message, contact_uuid, 'addition')
256
+
257
+ if user_message == 'exit':
258
+ state_label = 'exit'
259
+ else:
260
+ state_label = 'addition-question-sequence'
261
+ # Used in FSM
262
+ input_prompt = messages.pop()
263
+ message_package = {
264
+ 'messages': messages,
265
+ 'input_prompt': input_prompt,
266
+ 'state': state_label
267
+ }
268
+
269
+ # Used in quiz w/ hints
270
+ # context_data = context_result
271
+ # message_package['state'] = state_label
272
+
273
+ elif context_data['state'] == 'subtraction-question-sequence' or \
274
+ user_message == 'subtract':
275
+ messages = manage_math_quiz_fsm(user_message, contact_uuid, 'subtraction')
276
+
277
+ if user_message == 'exit':
278
+ state_label = 'exit'
279
+ else:
280
+ state_label = 'subtraction-question-sequence'
281
+
282
+ input_prompt = messages.pop()
283
+
284
+ message_package = {
285
+ 'messages': messages,
286
+ 'input_prompt': input_prompt,
287
+ 'state': state_label
288
+ }
289
+
290
+ # message_package = {
291
+ # 'messages': [
292
+ # "Time for some subtraction!",
293
+ # "Type your response as a number. For example, for '1 - 1', you'd write 0."
294
+ # ],
295
+ # 'input_prompt': "Here's the first one... What's 3-1?",
296
+ # 'state': "subtract-question-sequence"
297
+ # }
298
+ elif context_data['state'] == 'exit' or user_message == 'exit':
299
+ message_package = {
300
+ 'messages': [
301
+ "Great, thanks for practicing math today. Come back any time."
302
+ ],
303
+ 'input_prompt': "",
304
+ 'state': "exit"
305
+ }
306
+ else:
307
+ message_package = {
308
+ 'messages': [
309
+ "Hmmm...sorry friend. I'm not really sure what to do."
310
+ ],
311
+ 'input_prompt': "Please type add or subtract to start a math activity.",
312
+ 'state': "reprompt-menu-options"
313
+ }
314
+ # Used in FSM
315
+ return message_package
316
+
317
+ # Used in quiz folder approach
318
+ # return context_result, message_package
319
+
320
+
321
+ def manage_conversation_response(data_json):
322
+ """ Calls functions necessary to determine message and context data to send
323
+
324
+ Input
325
+ - data_json: dict - message data from Turn.io/Whatsapp
326
+
327
+ Output
328
+ - context: dict - a record of the state at a given point a conversation
329
+
330
+ TODOs
331
+ - implement logging of message
332
+ - test interactive messages
333
+ - review context object and re-work to use a standardized format
334
+ - review ways for more robust error handling
335
+ - need to make util functions that apply to both /nlu and /conversation_manager
336
+ """
337
+ message_data = data_json.get('message_data', '')
338
+ context_data = data_json.get('context_data', '')
339
+
340
+ whatsapp_id = message_data['author_id']
341
+ user_message = message_data['message_body']
342
+ contact_uuid = message_data['contact_uuid']
343
+
344
+ # TODO: Need to incorporate nlu_response into wormhole by checking answers against database (spreadsheet?)
345
+ nlu_response = evaluate_message_with_nlu(message_data)
346
+
347
+ if context_data['state'] == 'addition':
348
+ context_result, message_package = return_next_conversational_state(
349
+ context_data,
350
+ user_message,
351
+ contact_uuid
352
+ )
353
+ else:
354
+ message_package = return_next_conversational_state(
355
+ context_data,
356
+ user_message,
357
+ contact_uuid
358
+ )
359
+
360
+ headers = {
361
+ 'Authorization': f"Bearer {os.environ.get('TURN_AUTHENTICATION_TOKEN')}",
362
+ 'Content-Type': 'application/json'
363
+ }
364
+
365
+ # Send all messages for the current state before a user input prompt (text/button input request)
366
+ for message in message_package['messages']:
367
+ data = create_text_message(message, whatsapp_id)
368
+
369
+ print("data")
370
+ print(data)
371
+
372
+ r = requests.post(
373
+ f'https://whatsapp.turn.io/v1/messages',
374
+ data=json.dumps(data),
375
+ headers=headers
376
+ )
377
+
378
+ # Update the context object with the new state of the conversation
379
+ if context_data['state'] == 'addition':
380
+ context = {
381
+ "context": {
382
+ "user": whatsapp_id,
383
+ "state": message_package['state'],
384
+ "bot_message": message_package['input_prompt'],
385
+ "user_message": user_message,
386
+ "type": 'ask',
387
+ # Necessary for quiz folder approach
388
+ "text": context_result.get('text'),
389
+ "question_numbers": context_result.get('question_numbers'),
390
+ "right_answer": context_result.get('right_answer'),
391
+ "number_correct": context_result.get('number_correct'),
392
+ "hints_used": context_result.get('hints_used'),
393
+ }
394
+ }
395
+ else:
396
+ context = {
397
+ "context": {
398
+ "user": whatsapp_id,
399
+ "state": message_package['state'],
400
+ "bot_message": message_package['input_prompt'],
401
+ "user_message": user_message,
402
+ "type": 'ask',
403
+ }
404
+ }
405
+
406
+ return context
407
+
408
+ # data = {
409
+ # "to": whatsapp_id,
410
+ # "type": "interactive",
411
+ # "interactive": {
412
+ # "type": "button",
413
+ # # "header": { },
414
+ # "body": {
415
+ # "text": "Did I answer your question?"
416
+ # },
417
+ # # "footer": { },
418
+ # "action": {
419
+ # "buttons": [
420
+ # {
421
+ # "type": "reply",
422
+ # "reply": {
423
+ # "id": "inquiry-yes",
424
+ # "title": "Yes"
425
+ # }
426
+ # },
427
+ # {
428
+ # "type": "reply",
429
+ # "reply": {
430
+ # "id": "inquiry-no",
431
+ # "title": "No"
432
+ # }
433
+ # }
434
+ # ]
435
+ # }
436
+ # }
437
+ # }
mathtext_fastapi/data/intent_classification_model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea4954368c3b95673167ce347f2962b5508c4af295b6af58b6c11b3c1075b42e
3
+ size 127903
mathtext_fastapi/data/labeled_data.csv ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Utterance,Label
2
+ skip this,skip
3
+ this is stupid,skip
4
+ this is stupid,harder
5
+ this is stupid,feedback
6
+ I'm done,exit
7
+ quit,exit
8
+ I don't know,hint
9
+ help,hint
10
+ can I do something else?,main menu
11
+ what's going on,rapport
12
+ what's going on,main menu
13
+ tell me a joke,rapport
14
+ tell me a joke,main menu
15
+ Sorry I don't understand,do not know
16
+ Ten thousand,number
17
+ 1.234,number
18
+ "10,000",number
19
+ "123, 456",numbers
20
+ "11, 12, 13",numbers
21
+ "100, 200, 300",numbers
22
+ "100, 200",numbers
23
+ Stop for a minute,wait
24
+ Bye bye,exit
25
+ Good night,exit
26
+ Am done,exit
27
+ Yes,yes
28
+ Help,help
29
+ Idiot,harder
30
+ Stop,exit
31
+ I don't get it,hint
32
+ Math,main menu
33
+ Math,math topic
34
+ Tomorrow let do math,wait
35
+ Later,wait
36
+ Pls i will continue pls,skip
37
+ Rori tell me now,help
38
+ harder,skip
39
+ Stop for now i wont to go to School,exit
40
+ Next,next
41
+ Okay,okay
42
+ Great,affirmation
43
+ Give me for example,example
44
+ No I want to learn algebraic expressions,algebra
45
+ Hi rori,greeting
46
+ *help*,help
47
+ *Next*,next
48
+ Okay nice,okay
49
+ I don't know it,hint
50
+ Nex,next
51
+ I need a help,hint
52
+ Please can I ask your any math questions?,faq
53
+ The answer is 1,answer
54
+ The answer is 1,number
55
+ But 0.8 is also same as . 8 so I was actually right,I'm right
56
+ What is the number system?,faq
57
+ Ok thanks,thanks
58
+ I'm going to school now,exit
59
+ Let's move to another topic,main menu
60
+ "Ummanni saba
61
+ Kebena bara kana galmi keenya inni guddaan bilisummaa qofa #Gabrummaan_ammaan booda_gaha namni hundi bakka jiru irraa kutatee ka,ee jira obboleewwan goototni keenya jiran haqa Kebenaaf jechaa jiru Guraandhala 29 booda walabummaa keenya labsina Dhugaa qabna Ni injifanna *** . Naannoo giddu galeessa Itoophiyaatti #Kebenaan aanaa addaati Kun murtoo ummata Kebenaa hundaati",spam
62
+ Yes it,yes
63
+ U type fast,too fast
64
+ I mean your typing is fast,too fast
65
+ Why do u type so fast,too fast
66
+ Ur typing is fast,too fast
67
+ Can we go to a real work,harder
68
+ I know all this,harder
69
+ Answer this,preamble
70
+ Am tired,exit
71
+ This is not what I asked for,main menu
72
+ Bye,exit
73
+ 😱😱😂😂😂😡😰😰😰😒,spam
74
+ Gbxbxbcbcbbcbchcbchc,spam
75
+ I want to solve math,math topic
76
+ Pleas let start with the fraction,fractions topic
77
+ Okey,okay
78
+ i need substraction,subtraction topic
79
+ Can you please stop with me,exit
80
+ Another one,next
81
+ Harder or easy,main menu
82
+ Hard or easier,main menu
83
+ Jump topic,menu
84
+ Got it,okay
85
+ I didn't understand,don't know
86
+ Don't understand,don't know
87
+ Excuse me pls,hint
88
+ Let stop for today,exit
89
+ Help and stop asking me stupid questions,
90
+ Ykay,okay
91
+ Not interested in solving this,menu
92
+ Stpo,exit
93
+ Hiiiiiii,greeting
94
+ Hi rori,greeting
95
+ I've done this things before,harder
96
+ Which number my phone number,
97
+ Unit,main menu
98
+ No ide,don't know
99
+ No ide,hint
100
+ No idea,don't know
101
+ 🙈🤩😇🙏,spam
102
+ Thank u,thanks
103
+ Do you know programming,faq
104
+ Delete my number,unsubscribe
105
+ See u,exit
106
+ Can I go for break ??,wait
107
+ I wanna fuck,profanity
108
+ Enough of this nw,exit
109
+ Can we move to equations,equations
110
+ Do you know you are an idiot,insult
111
+ 3 digit number,number
112
+ 3 digit number,answer
113
+ Three digit number,confident answer
114
+ Three digit number,number
115
+ Good evening Rori,greeting
116
+ 89 Next,answer
117
+ 89 Next,number
118
+ 3 digit number,answer
119
+ Three digit number,answer
120
+ This is too simple,harder
121
+ Am not a kid,harder
122
+ Hey Miss Roribcan you ask me some question from Secondary 2,greeting
123
+ Hey Miss Roribcan you ask me some question from Secondary 2,faq
124
+ Hey Miss Roribcan you ask me some question from Secondary 2,main menu
125
+ don't know,hint
126
+ don't know,easier
127
+ 𝑴𝒂𝒕𝒉,math
128
+ Rori can you help me to gat value,
129
+ I called but u are not picking up,
130
+ 0.3 answer,answer
131
+ Sorry rori was101,answer
132
+ Y is it 6,answer
133
+ Y is it 6,number
134
+ 0.3 answer,number
135
+ Why 0.5,more explanation
136
+ Why 0.5,number
137
+ 6\nNext,Next
138
+ How is the answer is 11,more explanation
139
+ How comes we have 11,more explanation
140
+ Yes 6,answer
141
+ Yes 6,number
142
+ 6\nNext,number
143
+ How is the answer is 11,number
144
+ How comes we have 11,number
mathtext_fastapi/data/text2int_results.csv ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ input,output,text2int,score
2
+ notanumber,32202.0,32202.0,True
3
+ this is not a number,32202.0,32202.0,True
4
+ fourteen,14.0,14.0,True
5
+ forteen,14.0,14.0,True
6
+ one thousand four hundred ninety two,1492.0,1492.0,True
7
+ one thousand ninety two,1092.0,1092.0,True
8
+ Fourteen Hundred Ninety-Two,1492.0,1492.0,True
9
+ Fourteen Hundred,1400.0,1400.0,True
10
+ Ninety nine,99.0,99.0,True
11
+ fifteen thousand five hundred-sixty,15560.0,15560.0,True
12
+ three hundred fifty,350.0,350.0,True
13
+ one nine eight five,1985.0,1985.0,True
14
+ nineteen eighty-five,1985.0,1605.0,False
15
+ oh one,1.0,1.0,True
16
+ six oh 1,601.0,601.0,True
17
+ sex,6.0,6.0,True
18
+ six,6.0,6.0,True
19
+ eight oh,80.0,8.0,False
20
+ eighty,80.0,80.0,True
21
+ ate,8.0,1.0,False
22
+ double eight,88.0,8.0,False
23
+ eight three seven five three O nine,8375309.0,8375329.0,False
24
+ eight three seven five three oh nine,8375309.0,8375309.0,True
25
+ eight three seven five three zero nine,8375309.0,8375309.0,True
26
+ eight three seven five three oh ni-ee-ine,8375309.0,837530619.0,False
27
+ two eight,28.0,16.0,False
28
+ seven oh eleven,7011.0,77.0,False
29
+ seven elevens,77.0,77.0,True
30
+ seven eleven,711.0,77.0,False
31
+ ninety nine oh five,9905.0,149.0,False
32
+ seven 0 seven 0 seven 0 seven,7070707.0,7070707.0,True
33
+ 123 hundred,123000.0,223.0,False
34
+ 5 o 5,505.0,525.0,False
35
+ 15 o 5,1505.0,22.0,False
36
+ 15-o 5,1505.0,22.0,False
37
+ 15 o-5,1505.0,22.0,False
38
+ 911-thousand,911000.0,911000.0,True
39
+ twenty-two twenty-two,2222.0,44.0,False
40
+ twenty-two twenty-twos,484.0,44.0,False
41
+ four eighty four,484.0,404.0,False
42
+ four eighties,320.0,72.0,False
43
+ four eighties and nine nineties,1130.0,243.0,False
44
+ ninety nine hundred and seventy seven,9977.0,276.0,False
45
+ seven thousands,7000.0,7000.0,True
46
+ 2 hundreds,200.0,200.0,True
47
+ 99 thousands and one,99001.0,99001.0,True
48
+ "forty-five thousand, seven hundred and nine",45709.0,1161.0,False
49
+ eighty eight hundred eighty,8880.0,268.0,False
50
+ a hundred hundred,10000.0,100.0,False
51
+ a hundred thousand,100000.0,100.0,False
52
+ a hundred million,100000000.0,100.0,False
53
+ nineteen ninety nine,1999.0,1809.0,False
54
+ forteen twenty seven,1427.0,307.0,False
55
+ seventeen-thousand and seventy two,17072.0,17072.0,True
56
+ two hundred and nine,209.0,209.0,True
57
+ two thousand ten,2010.0,2010.0,True
58
+ two thousand and ten,2010.0,2010.0,True
59
+ twelve million,12000000.0,12000000.0,True
60
+ 8 billion,8000000000.0,8000000000.0,True
61
+ twenty ten,2010.0,2010.0,True
62
+ thirty-two hundred,3200.0,3200.0,True
63
+ nine,9.0,9.0,True
64
+ forty two,42.0,42.0,True
65
+ 1 2 three,123.0,123.0,True
66
+ fourtean,14.0,14.0,True
67
+ one tousand four hundred ninty two,1492.0,1492.0,True
68
+ Furteen Hundrd Ninety-Too,1492.0,1492.0,True
69
+ forrteen,14.0,14.0,True
70
+ sevnteen-thosand and seventy two,17072.0,17072.0,True
71
+ ninety nine hundred ad seventy seven,9977.0,90.0,False
72
+ seven thusands,7000.0,7000.0,True
73
+ 2 hunreds,200.0,200.0,True
74
+ 99 tousands and one,99001.0,99001.0,True
75
+ eighty ate hundred eighty,8880.0,261.0,False
76
+ fourteen Hundred,1400.0,1400.0,True
77
+ 8 Bilion,8000000000.0,8000000.0,False
78
+ one million three thousand one,1003001.0,1003001.0,True
79
+ four million nine thousand seven,4009007.0,4009007.0,True
80
+ two million five hundred thousand,2500000.0,2001500.0,False
81
+ two tousand ten,2010.0,2010.0,True
82
+ two thousand teen,2010.0,2007.0,False
83
+ tvelve milion,12000000.0,12000000.0,True
84
+ tventy ten,2010.0,2010.0,True
85
+ tirty-twoo hunred,3200.0,3200.0,True
86
+ sevn thoosands,7000.0,7000.0,True
87
+ five,5.0,5.0,True
88
+ ten,10.0,10.0,True
89
+ one two three and ten,12310.0,51.0,False
90
+ ONE MILLion three hunded and fiv,1000305.0,1000305.0,True
91
+ "50,500 and six",50506.0,50506.0,True
92
+ one_million_and_five,1000005.0,1000005.0,True
93
+ 2.0,2.0,2.0,True
94
+ 4.5,4.5,4.5,True
95
+ 12345.001,12345.001,12345.001,True
96
+ 7..0,7.0,7.0,True
97
+ 0.06,0.06,0.06,True
98
+ "0,25",0.25,25.0,False
99
+ o.45,0.45,32202.0,False
100
+ 0.1.2,0.12,32202.0,False
101
+ 0.00009,9e-05,9e-05,True
102
+ 0.01.,0.01,0.01,True
103
+ I don't know 8,8.0,8.0,True
104
+ "You're wrong it's not 20, it's 45",45.0,20.0,False
105
+ I don't understand why it's 19,19.0,19.0,True
mathtext_fastapi/intent_classification.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+
4
+ from pathlib import Path
5
+ from sentence_transformers import SentenceTransformer
6
+ from sklearn.linear_model import LogisticRegression
7
+ from joblib import dump, load
8
+
9
+ def pickle_model(model):
10
+ DATA_DIR = Path(__file__).parent.parent / "mathtext_fastapi" / "data" / "intent_classification_model.joblib"
11
+ dump(model, DATA_DIR)
12
+
13
+
14
+ def create_intent_classification_model():
15
+ encoder = SentenceTransformer('all-MiniLM-L6-v2')
16
+ # path = list(Path.cwd().glob('*.csv'))
17
+ DATA_DIR = Path(__file__).parent.parent / "mathtext_fastapi" / "data" / "labeled_data.csv"
18
+
19
+ print("DATA_DIR")
20
+ print(f"{DATA_DIR}")
21
+
22
+ with open(f"{DATA_DIR}",'r', newline='', encoding='utf-8') as f:
23
+ df = pd.read_csv(f)
24
+ df = df[df.columns[:2]]
25
+ df = df.dropna()
26
+ X_explore = np.array([list(encoder.encode(x)) for x in df['Utterance']])
27
+ X = np.array([list(encoder.encode(x)) for x in df['Utterance']])
28
+ y = df['Label']
29
+ model = LogisticRegression(class_weight='balanced')
30
+ model.fit(X, y, sample_weight=None)
31
+
32
+ print("MODEL")
33
+ print(model)
34
+
35
+ pickle_model(model)
36
+
37
+
38
+ def retrieve_intent_classification_model():
39
+ DATA_DIR = Path(__file__).parent.parent / "mathtext_fastapi" / "data" / "intent_classification_model.joblib"
40
+ model = load(DATA_DIR)
41
+ return model
42
+
43
+
44
+ encoder = SentenceTransformer('all-MiniLM-L6-v2')
45
+ # model = retrieve_intent_classification_model()
46
+ DATA_DIR = Path(__file__).parent.parent / "mathtext_fastapi" / "data" / "intent_classification_model.joblib"
47
+ model = load(DATA_DIR)
48
+
49
+
50
+ def predict_message_intent(message):
51
+ tokenized_utterance = np.array([list(encoder.encode(message))])
52
+ predicted_label = model.predict(tokenized_utterance)
53
+ predicted_probabilities = model.predict_proba(tokenized_utterance)
54
+ confidence_score = predicted_probabilities.max()
55
+
56
+ return {"type": "intent", "data": predicted_label[0], "confidence": confidence_score}
mathtext_fastapi/logging.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from datetime import datetime
3
+
4
+ from dotenv import load_dotenv
5
+ from supabase import create_client
6
+
7
+ load_dotenv()
8
+
9
+ SUPA = create_client(
10
+ os.environ.get('SUPABASE_URL'),
11
+ os.environ.get('SUPABASE_KEY')
12
+ )
13
+
14
+
15
+ def log_message_data_through_supabase_api(table_name, log_data):
16
+ return SUPA.table(table_name).insert(log_data).execute()
17
+
18
+
19
+ def format_datetime_in_isoformat(dt):
20
+ return getattr(dt.now(), 'isoformat', lambda x: None)()
21
+
22
+
23
+ def get_or_create_supabase_entry(table_name, insert_data, check_variable=None):
24
+ """ Checks if project or contact exists and adds entry if not found
25
+
26
+ Input:
27
+ - table_name: str- the name of the table in Supabase that is being examined
28
+ - insert_data: json - the data to insert
29
+ - check_variable: str/None - the specific field to check for existing match
30
+
31
+ Result
32
+ - logged_data - an object with the Supabase data
33
+ """
34
+ if table_name == 'contact':
35
+ resp = SUPA.table('contact').select("*").eq(
36
+ "original_contact_id",
37
+ insert_data['original_contact_id']
38
+ ).eq(
39
+ "project",
40
+ insert_data['project']
41
+ ).execute()
42
+ else:
43
+ resp = SUPA.table(table_name).select("*").eq(
44
+ check_variable,
45
+ insert_data[check_variable]
46
+ ).execute()
47
+
48
+ if len(resp.data) == 0:
49
+ logged_data = log_message_data_through_supabase_api(
50
+ table_name,
51
+ insert_data
52
+ )
53
+ else:
54
+ logged_data = resp
55
+ return logged_data
56
+
57
+
58
+ def prepare_message_data_for_logging(message_data, nlu_response):
59
+ """ Builds objects for each table and logs them to the database
60
+
61
+ Input:
62
+ - message_data: an object with the full message data from Turn.io/Whatsapp
63
+ """
64
+ project_data = {
65
+ 'name': "Rori",
66
+ # Autogenerated fields: id, created_at, modified_at
67
+ }
68
+ project_data_log = get_or_create_supabase_entry(
69
+ 'project',
70
+ project_data,
71
+ 'name'
72
+ )
73
+
74
+ contact_data = {
75
+ 'project': project_data_log.data[0]['id'], # FK
76
+ 'original_contact_id': message_data['contact_uuid'],
77
+ 'urn': "",
78
+ 'language_code': "en",
79
+ 'contact_inserted_at': format_datetime_in_isoformat(datetime.now())
80
+ # Autogenerated fields: id, created_at, modified_at
81
+ }
82
+ contact_data_log = get_or_create_supabase_entry('contact', contact_data)
83
+
84
+ del message_data['author_id']
85
+
86
+ message_data = {
87
+ 'contact': contact_data_log.data[0]['id'], # FK
88
+ 'original_message_id': message_data['message_id'],
89
+ 'text': message_data['message_body'],
90
+ 'direction': message_data['message_direction'],
91
+ 'sender_type': message_data['author_type'],
92
+ 'channel_type': "whatsapp / turn.io",
93
+ 'message_inserted_at': message_data['message_inserted_at'],
94
+ 'message_modified_at': message_data['message_updated_at'],
95
+ 'message_sent_at': format_datetime_in_isoformat(datetime.now()),
96
+ 'nlu_response': nlu_response,
97
+ 'request_object': message_data
98
+ # Autogenerated fields: created_at, modified_at
99
+ }
100
+ message_data_log = log_message_data_through_supabase_api(
101
+ 'message',
102
+ message_data
103
+ )
mathtext_fastapi/math_quiz_fsm.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from transitions import Machine
3
+
4
+
5
+ class MathQuizFSM(object):
6
+ states = [
7
+ 'quiz_start',
8
+ 'quiz_question',
9
+ 'quiz_end'
10
+ ]
11
+
12
+ transitions = [
13
+ ['ask_second_question', 'quiz_start', 'quiz_question'],
14
+ ['ask_next_question', 'quiz_question', 'quiz_question'],
15
+ ['exit', 'quiz_start', 'quiz_end'],
16
+ ['exit', 'quiz_question', 'quiz_end'],
17
+ ]
18
+
19
+ def __init__(
20
+ self,
21
+ initial_state='quiz_start',
22
+ question_nums=[2, 3],
23
+ initial_student_answer=0,
24
+ ):
25
+ # Instantiate the FSM
26
+ self.machine = Machine(
27
+ model=self,
28
+ states=MathQuizFSM.states,
29
+ transitions=MathQuizFSM.transitions,
30
+ initial=initial_state
31
+ )
32
+
33
+ # Instantiate variables necessary for tracking activity
34
+ self.question_nums = question_nums
35
+ self.correct_answer = self.question_nums[0] + self.question_nums[1]
36
+ self.student_answer = initial_student_answer
37
+ self.is_correct_answer = False
38
+ self.response_text = f"What is {self.question_nums[0]} + {self.question_nums[1]}?"
39
+
40
+ # Define functions to run on transitions
41
+ self.machine.on_enter_quiz_question('generate_math_problem')
42
+ self.machine.on_exit_quiz_question('validate_answer')
43
+
44
+ def validate_answer(self):
45
+ if self.student_answer == 'exit':
46
+ self.machine.set_state('quiz_end')
47
+ return ["Come back any time!"]
48
+ elif self.correct_answer == self.student_answer:
49
+ self.machine.set_state('quiz_question')
50
+ self.generate_math_problem()
51
+ return ['Great job!', self.response_text]
52
+ else:
53
+ return ["That's not quite right. Try again.", self.response_text]
54
+
55
+ def generate_math_problem(self):
56
+ self.question_nums = random.sample(range(1,100),2)
57
+ self.response_text = f"What is {self.question_nums[0]} + {self.question_nums[1]}"
58
+ self.correct_answer = self.question_nums[0] + self.question_nums[1]
mathtext_fastapi/math_subtraction_fsm.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from transitions import Machine
3
+
4
+
5
+ class MathSubtractionFSM(object):
6
+ states = [
7
+ 'quiz_start',
8
+ 'quiz_question',
9
+ 'quiz_end'
10
+ ]
11
+
12
+ transitions = [
13
+ ['ask_second_question', 'quiz_start', 'quiz_question'],
14
+ ['ask_next_question', 'quiz_question', 'quiz_question'],
15
+ ['exit', 'quiz_start', 'quiz_end'],
16
+ ['exit', 'quiz_question', 'quiz_end'],
17
+ ]
18
+
19
+ def __init__(
20
+ self,
21
+ initial_state='quiz_start',
22
+ question_nums=[4, 3],
23
+ initial_student_answer=0,
24
+ ):
25
+ # Instantiate the FSM
26
+ self.machine = Machine(
27
+ model=self,
28
+ states=MathSubtractionFSM.states,
29
+ transitions=MathSubtractionFSM.transitions,
30
+ initial=initial_state
31
+ )
32
+
33
+ # Instantiate variables necessary for tracking activity
34
+ self.question_nums = question_nums
35
+ self.correct_answer = self.question_nums[0] - self.question_nums[1]
36
+ self.student_answer = initial_student_answer
37
+ self.is_correct_answer = False
38
+ self.response_text = f"What is {self.question_nums[0]} - {self.question_nums[1]}?"
39
+
40
+ # Define functions to run on transitions
41
+ self.machine.on_enter_quiz_question('generate_math_problem')
42
+ self.machine.on_exit_quiz_question('validate_answer')
43
+
44
+ def validate_answer(self):
45
+ if self.student_answer == 'exit':
46
+ self.machine.set_state('quiz_end')
47
+ return ["Come back any time!"]
48
+ elif self.correct_answer == self.student_answer:
49
+ self.machine.set_state('quiz_question')
50
+ self.generate_math_problem()
51
+ return ['Great job!', self.response_text]
52
+ else:
53
+ return ["That's not quite right. Try again.", self.response_text]
54
+
55
+ def generate_math_problem(self):
56
+ self.question_nums = random.sample(range(1, 100), 2)
57
+ self.response_text = f"What is {self.question_nums[0]} - {self.question_nums[1]}"
58
+ self.correct_answer = self.question_nums[0] - self.question_nums[1]
mathtext_fastapi/nlu.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fuzzywuzzy import fuzz
2
+ from mathtext_fastapi.logging import prepare_message_data_for_logging
3
+ from mathtext.sentiment import sentiment
4
+ from mathtext.text2int import text2int
5
+ from mathtext_fastapi.intent_classification import create_intent_classification_model, retrieve_intent_classification_model, predict_message_intent
6
+ import re
7
+
8
+
9
+ def build_nlu_response_object(type, data, confidence):
10
+ """ Turns nlu results into an object to send back to Turn.io
11
+ Inputs
12
+ - type: str - the type of nlu run (integer or sentiment-analysis)
13
+ - data: str/int - the student message
14
+ - confidence: - the nlu confidence score (sentiment) or '' (integer)
15
+
16
+ >>> build_nlu_response_object('integer', 8, 0)
17
+ {'type': 'integer', 'data': 8, 'confidence': 0}
18
+
19
+ >>> build_nlu_response_object('sentiment', 'POSITIVE', 0.99)
20
+ {'type': 'sentiment', 'data': 'POSITIVE', 'confidence': 0.99}
21
+ """
22
+ return {'type': type, 'data': data, 'confidence': confidence}
23
+
24
+
25
+ # def test_for_float_or_int(message_data, message_text):
26
+ # nlu_response = {}
27
+ # if type(message_text) == int or type(message_text) == float:
28
+ # nlu_response = build_nlu_response_object('integer', message_text, '')
29
+ # prepare_message_data_for_logging(message_data, nlu_response)
30
+ # return nlu_response
31
+
32
+
33
+ def test_for_number_sequence(message_text_arr, message_data, message_text):
34
+ """ Determines if the student's message is a sequence of numbers
35
+
36
+ >>> test_for_number_sequence(['1','2','3'], {"author_id": "57787919091", "author_type": "OWNER", "contact_uuid": "df78gsdf78df", "message_body": "I am tired", "message_direction": "inbound", "message_id": "dfgha789789ag9ga", "message_inserted_at": "2023-01-10T02:37:28.487319Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"}, '1, 2, 3')
37
+ {'type': 'integer', 'data': '1,2,3', 'confidence': 0}
38
+
39
+ >>> test_for_number_sequence(['a','b','c'], {"author_id": "57787919091", "author_type": "OWNER", "contact_uuid": "df78gsdf78df", "message_body": "I am tired", "message_direction": "inbound", "message_id": "dfgha789789ag9ga", "message_inserted_at": "2023-01-10T02:37:28.487319Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"}, 'a, b, c')
40
+ {}
41
+ """
42
+ nlu_response = {}
43
+ if all(ele.isdigit() for ele in message_text_arr):
44
+ nlu_response = build_nlu_response_object(
45
+ 'integer',
46
+ ','.join(message_text_arr),
47
+ 0
48
+ )
49
+ prepare_message_data_for_logging(message_data, nlu_response)
50
+ return nlu_response
51
+
52
+
53
+ def run_text2int_on_each_list_item(message_text_arr):
54
+ """ Attempts to convert each list item to an integer
55
+
56
+ Input
57
+ - message_text_arr: list - a set of text extracted from the student message
58
+
59
+ Output
60
+ - student_response_arr: list - a set of integers (32202 for error code)
61
+
62
+ >>> run_text2int_on_each_list_item(['1','2','3'])
63
+ [1, 2, 3]
64
+ """
65
+ student_response_arr = []
66
+ for student_response in message_text_arr:
67
+ int_api_resp = text2int(student_response.lower())
68
+ student_response_arr.append(int_api_resp)
69
+ return student_response_arr
70
+
71
+
72
+ def run_sentiment_analysis(message_text):
73
+ """ Evaluates the sentiment of a student message
74
+
75
+ >>> run_sentiment_analysis("I am tired")
76
+ [{'label': 'NEGATIVE', 'score': 0.9997807145118713}]
77
+
78
+ >>> run_sentiment_analysis("I am full of joy")
79
+ [{'label': 'POSITIVE', 'score': 0.999882698059082}]
80
+ """
81
+ # TODO: Add intent labelling here
82
+ # TODO: Add logic to determine whether intent labeling or sentiment analysis is more appropriate (probably default to intent labeling)
83
+ return sentiment(message_text)
84
+
85
+
86
+ def run_intent_classification(message_text):
87
+ """ Process a student's message using basic fuzzy text comparison
88
+
89
+ >>> run_intent_classification("exit")
90
+ {'type': 'intent', 'data': 'exit', 'confidence': 1.0}
91
+ >>> run_intent_classification("exi")
92
+ {'type': 'intent', 'data': 'exit', 'confidence': 0.86}
93
+ >>> run_intent_classification("eas")
94
+ {'type': 'intent', 'data': '', 'confidence': 0}
95
+ >>> run_intent_classification("hard")
96
+ {'type': 'intent', 'data': '', 'confidence': 0}
97
+ >>> run_intent_classification("hardier")
98
+ {'type': 'intent', 'data': 'harder', 'confidence': 0.92}
99
+ """
100
+ label = ''
101
+ ratio = 0
102
+ nlu_response = {'type': 'intent', 'data': label, 'confidence': ratio}
103
+ commands = [
104
+ 'easier',
105
+ 'exit',
106
+ 'harder',
107
+ 'hint',
108
+ 'next',
109
+ 'stop',
110
+ ]
111
+
112
+ for command in commands:
113
+ try:
114
+ ratio = fuzz.ratio(command, message_text.lower())
115
+ except:
116
+ ratio = 0
117
+ if ratio > 80:
118
+ nlu_response['data'] = command
119
+ nlu_response['confidence'] = ratio / 100
120
+
121
+ return nlu_response
122
+
123
+
124
+ def evaluate_message_with_nlu(message_data):
125
+ """ Process a student's message using NLU functions and send the result
126
+
127
+ >>> evaluate_message_with_nlu({"author_id": "57787919091", "author_type": "OWNER", "contact_uuid": "df78gsdf78df", "message_body": "8", "message_direction": "inbound", "message_id": "dfgha789789ag9ga", "message_inserted_at": "2023-01-10T02:37:28.487319Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"})
128
+ {'type': 'integer', 'data': 8, 'confidence': 0}
129
+
130
+ >>> evaluate_message_with_nlu({"author_id": "57787919091", "author_type": "OWNER", "contact_uuid": "df78gsdf78df", "message_body": "I am tired", "message_direction": "inbound", "message_id": "dfgha789789ag9ga", "message_inserted_at": "2023-01-10T02:37:28.487319Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"})
131
+ {'type': 'sentiment', 'data': 'NEGATIVE', 'confidence': 0.9997807145118713}
132
+ """
133
+ # Keeps system working with two different inputs - full and filtered @event object
134
+ try:
135
+ message_text = str(message_data['message_body'])
136
+ except KeyError:
137
+ message_data = {
138
+ 'author_id': message_data['message']['_vnd']['v1']['chat']['owner'],
139
+ 'author_type': message_data['message']['_vnd']['v1']['author']['type'],
140
+ 'contact_uuid': message_data['message']['_vnd']['v1']['chat']['contact_uuid'],
141
+ 'message_body': message_data['message']['text']['body'],
142
+ 'message_direction': message_data['message']['_vnd']['v1']['direction'],
143
+ 'message_id': message_data['message']['id'],
144
+ 'message_inserted_at': message_data['message']['_vnd']['v1']['chat']['inserted_at'],
145
+ 'message_updated_at': message_data['message']['_vnd']['v1']['chat']['updated_at'],
146
+ }
147
+ message_text = str(message_data['message_body'])
148
+
149
+ # Run intent classification only for keywords
150
+ intent_api_response = run_intent_classification(message_text)
151
+ if intent_api_response['data']:
152
+ prepare_message_data_for_logging(message_data, intent_api_response)
153
+ return intent_api_response
154
+
155
+ number_api_resp = text2int(message_text.lower())
156
+
157
+ if number_api_resp == 32202:
158
+ # Run intent classification with logistic regression model
159
+ predicted_label = predict_message_intent(message_text)
160
+ if predicted_label['confidence'] > 0.01:
161
+ nlu_response = predicted_label
162
+ else:
163
+ # Run sentiment analysis
164
+ sentiment_api_resp = sentiment(message_text)
165
+ nlu_response = build_nlu_response_object(
166
+ 'sentiment',
167
+ sentiment_api_resp[0]['label'],
168
+ sentiment_api_resp[0]['score']
169
+ )
170
+ else:
171
+ nlu_response = build_nlu_response_object(
172
+ 'integer',
173
+ number_api_resp,
174
+ 0
175
+ )
176
+
177
+ prepare_message_data_for_logging(message_data, nlu_response)
178
+ return nlu_response
pyproject.toml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "MathText_FastAPI"
3
+ version = "0.0.1"
4
+ authors = [
5
+ "Sebastian Larsen <[email protected]>",
6
+ "Çetin ÇAKIR <[email protected]>",
7
+ "Hobson Lane <[email protected]>",
8
+ ]
9
+ description = "Natural Language Understanding (text processing) for math symbols, digits, and words with a Gradio user interface and REST API."
10
+ readme = "README.md"
11
+ # requires-python = ">=3.8"
12
+ license = "AGPL-3.0-or-later"
13
+ classifiers = [
14
+ "Programming Language :: Python :: 3",
15
+ "Programming Language :: Python :: 3.8",
16
+ "Programming Language :: Python :: 3.9",
17
+ "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
18
+ "Operating System :: OS Independent",
19
+ ]
20
+
21
+
22
+ [tool.poetry.dependencies]
23
+ mathtext = {git = "https://gitlab.com/tangibleai/community/mathtext", rev = "main"}
24
+ fastapi = "0.74.*"
25
+ pydantic = "*"
26
+ python = "^3.8,<3.10"
27
+ requests = "2.27.*"
28
+ sentencepiece = "0.1.*"
29
+ supabase = "*"
30
+ uvicorn = "0.17.*"
31
+
32
+ [tool.poetry.group.dev.dependencies]
33
+ pytest = "^7.2"
34
+
35
+ [build-system]
36
+ requires = ["poetry-core"]
37
+ build-backend = "poetry.core.masonry.api"
38
+
39
+ # [build-system]
40
+ # requires = ["hatchling"]
41
+ # build-backend = "hatchling.build"
42
+
43
+ # repository = "https://gitlab.com/tangibleai/community/mathtext-fastapi"
requirements.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ dill
2
+ en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.4.1/en_core_web_sm-3.4.1-py3-none-any.whl
3
+ fuzzywuzzy
4
+ jsonpickle
5
+ mathtext @ git+https://gitlab.com/tangibleai/community/mathtext@main
6
+ fastapi==0.74.*
7
+ pydantic==1.10.*
8
+ python-Levenshtein
9
+ requests==2.27.*
10
+ sentencepiece==0.1.*
11
+ sentence-transformers
12
+ sentry-sdk[fastapi]
13
+ supabase
14
+ transitions
15
+ uvicorn==0.17.*
16
+
scripts/__init__.py ADDED
File without changes
scripts/api_scaling.py ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """https://zetcode.com/python/concurrent-http-requests/"""
2
+
3
+ import asyncio
4
+ import random
5
+ import time
6
+ import pandas as pd
7
+ import httpx
8
+ from os.path import exists
9
+
10
+ NUMBER_OF_CALLS = 1
11
+
12
+ headers = {"Content-Type": "application/json; charset=utf-8"}
13
+
14
+ # base_url = "https://tangibleai-mathtext-fastapi.hf.space/{endpoint}"
15
+ base_url = "http://localhost:7860/run/{endpoint}"
16
+
17
+ data_list_1 = {
18
+ "endpoint": "text2int",
19
+ "test_data": [
20
+ "one hundred forty five",
21
+ "twenty thousand nine hundred fifty",
22
+ "one hundred forty five",
23
+ "nine hundred eighty three",
24
+ "five million",
25
+ ]
26
+ }
27
+
28
+ data_list_2 = {
29
+ "endpoint": "text2int-preprocessed",
30
+ "test_data": [
31
+ "one hundred forty five",
32
+ "twenty thousand nine hundred fifty",
33
+ "one hundred forty five",
34
+ "nine hundred eighty three",
35
+ "five million",
36
+ ]
37
+ }
38
+ data_list_3 = {
39
+ "endpoint": "sentiment-analysis",
40
+ "test_data": [
41
+ "Totally agree",
42
+ "I like it",
43
+ "No more",
44
+ "I am not sure",
45
+ "Never",
46
+ ]
47
+ }
48
+
49
+
50
+ # async call to endpoint
51
+ async def call_api(url, data, call_number, number_of_calls):
52
+ json = {"data": [data]}
53
+ async with httpx.AsyncClient() as client:
54
+ start = time.perf_counter() # Used perf_counter for more precise result.
55
+ response = await client.post(url=url, headers=headers, json=json, timeout=30)
56
+ end = time.perf_counter()
57
+ return {
58
+ "endpoint": url.split("/")[-1],
59
+ "test data": data,
60
+ "status code": response.status_code,
61
+ "response": response.json().get("data"),
62
+ "call number": call_number,
63
+ "number of calls": number_of_calls,
64
+ "start": start.__round__(4),
65
+ "end": end.__round__(4),
66
+ "delay": (end - start).__round__(4)
67
+ }
68
+
69
+
70
+ data_lists = [data_list_1, data_list_2, data_list_3]
71
+
72
+ results = []
73
+
74
+
75
+ async def main(number_of_calls):
76
+ for data_list in data_lists:
77
+ calls = []
78
+ for call_number in range(1, number_of_calls + 1):
79
+ url = base_url.format(endpoint=data_list["endpoint"])
80
+ data = random.choice(data_list["test_data"])
81
+ calls.append(call_api(url, data, call_number, number_of_calls))
82
+ r = await asyncio.gather(*calls)
83
+ results.extend(r)
84
+
85
+
86
+
87
+ start = time.perf_counter()
88
+ asyncio.run(main(NUMBER_OF_CALLS))
89
+ end = time.perf_counter()
90
+ print(end-start)
91
+ df = pd.DataFrame(results)
92
+
93
+ if exists("call_history.csv"):
94
+ df.to_csv(path_or_buf="call_history.csv", mode="a", header=False, index=False)
95
+ else:
96
+ df.to_csv(path_or_buf="call_history.csv", mode="w", header=True, index=False)
scripts/api_scaling.sh ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #! /bin/env bash
2
+
3
+ LOG_FILE_NAME="call_history_bash.csv"
4
+
5
+ if [[ ! -f "$LOG_FILE_NAME" ]]; then
6
+ # Creation of column names if the file does not exits
7
+ echo "student_id;active_students;endpoint;inputs;outputs;started;finished" >$LOG_FILE_NAME
8
+ fi
9
+
10
+ data_list_1() {
11
+ responses=(
12
+ "one hundred forty five"
13
+ "twenty thousand nine hundred fifty"
14
+ "one hundred forty five"
15
+ "nine hundred eighty three"
16
+ "five million"
17
+ )
18
+ echo "${responses[$1]}"
19
+ }
20
+
21
+ data_list_2() {
22
+ responses=(
23
+ "Totally agree"
24
+ "I like it"
25
+ "No more"
26
+ "I am not sure"
27
+ "Never"
28
+ )
29
+ echo "${responses[$1]}"
30
+ }
31
+
32
+ # endpoints: "text2int" "sentiment-analysis"
33
+ # selected endpoint to test
34
+ endpoint="sentiment-analysis"
35
+
36
+ create_random_delay() {
37
+ # creates a random delay for given arguments
38
+ echo "scale=8; $RANDOM/32768*$1" | bc
39
+ }
40
+
41
+ simulate_student() {
42
+ # Student simulator waits randomly between 0-10s after an interaction.
43
+ # Based on 100 interactions per student
44
+ for i in {1..100}; do
45
+
46
+ random_value=$((RANDOM % 5))
47
+ text=$(data_list_2 $random_value)
48
+ data='{"data": ["'$text'"]}'
49
+
50
+ start_=$(date +"%F %T.%6N")
51
+
52
+ url="https://tangibleai-mathtext-fastapi.hf.space/$3"
53
+ response=$(curl --silent --connect-timeout 30 --max-time 30 -X POST "$url" -H 'Content-Type: application/json' -d "$data")
54
+
55
+ if [[ "$response" == *"Time-out"* ]]; then
56
+ echo "$response" >>bad_response.txt
57
+ response="504 Gateway Time-out"
58
+ elif [[ -z "$response" ]]; then
59
+ echo "No response" >>bad_response.txt
60
+ response="504 Gateway Time-out"
61
+ fi
62
+
63
+ end_=$(date +"%F %T.%6N")
64
+
65
+ printf "%s;%s;%s;%s;%s;%s;%s\n" "$1" "$2" "$3" "$data" "$response" "$start_" "$end_" >>$LOG_FILE_NAME
66
+ sleep "$(create_random_delay 10)"
67
+
68
+ done
69
+ }
70
+
71
+ echo "start: $(date)"
72
+
73
+ active_students=250 # the number of students using the system at the same time
74
+
75
+ i=1
76
+ while [[ "$i" -le "$active_students" ]]; do
77
+ simulate_student "student$i" "$active_students" "$endpoint" &
78
+ sleep "$(create_random_delay 1)" # adding a random delay between students
79
+ i=$(("$i" + 1))
80
+ done
81
+
82
+ wait
83
+ echo "end: $(date)"
scripts/build.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ deactivate
2
+ pip install virtualenv
3
+ rm -rf .venv
4
+ python3.9 -m virtualenv --python 3.9 .venv
5
+ # pip install --upgrade scikit-learn
6
+ # pip install --upgrade transformers
7
+ # pip install --upgrade pandas
8
+ pip install --upgrade -e .
scripts/make_request.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import requests
3
+
4
+
5
+ def add_message_text_to_sample_object(message_text):
6
+ """
7
+ Builds a sample request object using an example of a student answer
8
+
9
+ Input
10
+ - message_text: str - an example of user input to test
11
+
12
+ Example Input
13
+ "test message"
14
+
15
+ Output
16
+ - b_string: json b-string - simulated Turn.io message data
17
+
18
+ Example Output
19
+ b'{"context": "hi", "message_data": {"author_id": "+57787919091", "author_type": "OWNER", "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09", "message_body": "test message", "message_direction": "inbound", "message_id": "4kl209sd0-a7b8-2hj3-8563-3hu4a89b32", "message_inserted_at": "2023-01-10T02:37:28.477940Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"}}'
20
+
21
+ """
22
+ message_data = '{' + f'"author_id": "+57787919091", "author_type": "OWNER", "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09", "message_body": "{message_text}", "message_direction": "inbound", "message_id": "4kl209sd0-a7b8-2hj3-8563-3hu4a89b32", "message_inserted_at": "2023-01-10T02:37:28.477940Z", "message_updated_at": "2023-01-10T02:37:28.487319Z"' + '}'
23
+ # context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}"' + '}'
24
+
25
+ context_data = '{' + '"user":"", "state":"start-conversation", "bot_message":"", "user_message":"{message_text}"' + '}'
26
+
27
+ # context_data = '{' + '"user":"", "state":"addition-question-sequence", "bot_message":"", "user_message":"{message_text}","text": "What is 2+3?","question_numbers": [4,3],"right_answer": 7,"number_correct": 2, "number_incorrect": 0, "hints_used": 0, "level": "easy"' + '}'
28
+
29
+ json_string = '{' + f'"context_data": {context_data}, "message_data": {message_data}' + '}'
30
+ b_string = json_string.encode("utf-8")
31
+
32
+ return b_string
33
+
34
+ # """
35
+ # "text": "What is 2+3?",
36
+ # "question_numbers": [2,3],
37
+ # "right_answer": 5,
38
+ # "number_correct": 2,
39
+ # "hints_used": 0,
40
+ # """
41
+
42
+
43
+ def run_simulated_request(endpoint, sample_answer, context=None):
44
+ print(f"Case: {sample_answer}")
45
+ b_string = add_message_text_to_sample_object(sample_answer)
46
+
47
+ if endpoint == 'sentiment-analysis' or endpoint == 'text2int' or endpoint =='intent-classification':
48
+ request = requests.post(
49
+ url=f'http://localhost:7860/{endpoint}',
50
+ json={'content': sample_answer}
51
+ ).json()
52
+ else:
53
+ request = requests.post(
54
+ url=f'http://localhost:7860/{endpoint}',
55
+ data=b_string
56
+ ).json()
57
+
58
+ print(request)
59
+
60
+
61
+ # run_simulated_request('intent-classification', 'exit')
62
+ # run_simulated_request('intent-classification', "I'm not sure")
63
+ # run_simulated_request('sentiment-analysis', 'I reject it')
64
+ # run_simulated_request('text2int', 'seven thousand nine hundred fifty seven')
65
+ run_simulated_request('nlu', 'test message')
66
+ run_simulated_request('nlu', 'eight')
67
+ run_simulated_request('nlu', 'is it 8')
68
+ run_simulated_request('nlu', 'can I know how its 0.5')
69
+ run_simulated_request('nlu', 'eight, nine, ten')
70
+ run_simulated_request('nlu', '8, 9, 10')
71
+ run_simulated_request('nlu', '8')
72
+ run_simulated_request('nlu', "I don't know")
73
+ run_simulated_request('nlu', "I don't know eight")
74
+ run_simulated_request('nlu', "I don't 9")
75
+ run_simulated_request('nlu', "0.2")
76
+ run_simulated_request('nlu', 'Today is a wonderful day')
77
+ run_simulated_request('nlu', 'IDK 5?')
78
+ # run_simulated_request('manager', '')
79
+ # run_simulated_request('manager', 'add')
80
+ # run_simulated_request('manager', 'subtract')
81
+ # run_simulated_request("question", {
82
+ # 'number_correct': 0,
83
+ # 'number_incorrect': 0,
84
+ # 'level': 'easy'
85
+ # })
86
+ # run_simulated_request("hint", {
87
+ # 'question_numbers': [1, 2, 3],
88
+ # 'right_answer': 3,
89
+ # 'number_correct': 0,
90
+ # 'number_incorrect': 0,
91
+ # 'level': 'easy',
92
+ # 'hints_used': 0
93
+ # })
94
+ # run_simulated_request("generate_question", {
95
+ # 'level': 'medium'
96
+ # })
97
+ # run_simulated_request("numbers_by_level", {
98
+ # 'level': 'medium'
99
+ # })
100
+ # run_simulated_request("number_sequence", {
101
+ # "current_number": 10,
102
+ # "ordinal_number": 2,
103
+ # "times": 1
104
+ # })
105
+ # run_simulated_request("level", {
106
+ # "current_level": "hard",
107
+ # "level_up": False
108
+ # })
109
+ # run_simulated_request('manager', 'exit')
110
+
111
+
112
+ # Example of simplified object received from Turn.io stacks
113
+ # This is a contrived example to show the structure, not an actual state
114
+ # NOTE: This is actually a bstring, not a dict
115
+ simplified_json = {
116
+ "context": {
117
+ "user": "+57787919091",
118
+ "state": "answer-addition-problem",
119
+ "bot_message": "What is 2+2?",
120
+ "user_message": "eight",
121
+ "type": "ask"
122
+ },
123
+ "message_data": {
124
+ "author_id": "+57787919091",
125
+ "author_type": "OWNER",
126
+ "contact_uuid": "j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09",
127
+ "message_body": "eight",
128
+ "message_direction": "inbound",
129
+ "message_id": "4kl209sd0-a7b8-2hj3-8563-3hu4a89b32",
130
+ "message_inserted_at": "2023-01-10T02:37:28.477940Z",
131
+ "message_updated_at": "2023-01-10T02:37:28.487319Z"
132
+ }
133
+ }
134
+
135
+
136
+ # Full example of event data from Turn.io
137
+ # simplified_json is built from this in Turn.io
138
+ # full_json = {
139
+ # 'message': {
140
+ # '_vnd': {
141
+ # 'v1': {
142
+ # 'author': {
143
+ # 'id': 57787919091,
144
+ # 'name': 'GT',
145
+ # 'type': 'OWNER'
146
+ # },
147
+ # 'card_uuid': None,
148
+ # 'chat': {
149
+ # 'assigned_to': {
150
+ # 'id': 'jhk151kl-hj42-3752-3hjk-h4jk6hjkk2',
151
+ # 'name': 'Greg Thompson',
152
+ # 'type': 'OPERATOR'
153
+ # },
154
+ # 'contact_uuid': 'j43hk26-2hjl-43jk-hnk2-k4ljl46j0ds09',
155
+ # 'inserted_at': '2022-07-05T04:00:34.033522Z',
156
+ # 'owner': '+57787919091',
157
+ # 'permalink': 'https://app.turn.io/c/4kl209sd0-a7b8-2hj3-8563-3hu4a89b32',
158
+ # 'state': 'OPEN',
159
+ # 'state_reason': 'Re-opened by inbound message.',
160
+ # 'unread_count': 19,
161
+ # 'updated_at': '2023-01-10T02:37:28.487319Z',
162
+ # 'uuid': '4kl209sd0-a7b8-2hj3-8563-3hu4a89b32'
163
+ # },
164
+ # 'direction': 'inbound',
165
+ # 'faq_uuid': None,
166
+ # 'in_reply_to': None,
167
+ # 'inserted_at': '2023-01-10T02:37:28.477940Z',
168
+ # 'labels': [{
169
+ # 'confidence': 0.506479332,
170
+ # 'metadata': {
171
+ # 'nlu': {
172
+ # 'confidence': 0.506479332,
173
+ # 'intent': 'question',
174
+ # 'model_name': 'nlu-general-spacy-ngrams-20191014'
175
+ # }
176
+ # },
177
+ # 'uuid': 'ha7890s2k-hjk2-2476-s8d9-fh9779a8a9ds',
178
+ # 'value': 'Unclassified'
179
+ # }],
180
+ # 'last_status': None,
181
+ # 'last_status_timestamp': None,
182
+ # 'on_fallback_channel': False,
183
+ # 'rendered_content': None,
184
+ # 'uuid': 's8df79zhws-h89s-hj23-7s8d-thb248d9bh2qn'
185
+ # }
186
+ # },
187
+ # 'from': 57787919091,
188
+ # 'id': 'hsjkthzZGehkzs09sijWA3',
189
+ # 'text': {'body': 'eight'},
190
+ # 'timestamp': 1673318248,
191
+ # 'type': 'text'
192
+ # },
193
+ # 'type': 'message'
194
+ # }
scripts/make_request.sh ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #root_url="localhost:7860"
2
+ root_url="https://tangibleai-mathtext-fastapi.hf.space"
3
+
4
+ ep="/"
5
+ url=$root_url$ep
6
+ data=''
7
+
8
+ response=$(curl --silent -X GET "$url" -H 'Content-Type: application/json')
9
+
10
+ echo "URL: $url"
11
+ echo "Data: $data"
12
+ echo "Response: $response"
13
+ echo
14
+
15
+ ep="/hello"
16
+ url=$root_url$ep
17
+ data='{"content":"Rori"}'
18
+
19
+ response=$(curl --silent -X POST "$url" -H 'Content-Type: application/json' -d "$data")
20
+
21
+ echo "URL: $url"
22
+ echo "Data: $data"
23
+ echo "Response: $response"
24
+ echo
25
+
26
+ ep="/sentiment-analysis"
27
+ url=$root_url$ep
28
+ data='{"content":"I am happy with it!"}'
29
+
30
+ response=$(curl --silent -X POST "$url" -H 'Content-Type: application/json' -d "$data")
31
+
32
+ echo "URL: $url"
33
+ echo "Data: $data"
34
+ echo "Response: $response"
35
+ echo
36
+
37
+ ep="/text2int"
38
+ url=$root_url$ep
39
+ data='{"content":"one hundred forty two"}'
40
+
41
+ response=$(curl --silent -X POST "$url" -H 'Content-Type: application/json' -d "$data")
42
+
43
+ echo "URL: $url"
44
+ echo "Data: $data"
45
+ echo "Response: $response"
46
+ echo
scripts/plot_calls.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from datetime import datetime
3
+
4
+ import matplotlib.pyplot as plt
5
+ import pandas as pd
6
+
7
+ pd.set_option('display.max_columns', None)
8
+ pd.set_option('display.max_rows', None)
9
+
10
+ log_files = [
11
+ 'call_history_sentiment_1_bash.csv',
12
+ 'call_history_text2int_1_bash.csv',
13
+ ]
14
+
15
+ for log_file in log_files:
16
+ path_ = f"./data/{log_file}"
17
+ df = pd.read_csv(filepath_or_buffer=path_, sep=";")
18
+ df["finished_ts"] = df["finished"].apply(
19
+ lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f").timestamp())
20
+ df["started_ts"] = df["started"].apply(
21
+ lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S.%f").timestamp())
22
+ df["elapsed"] = df["finished_ts"] - df["started_ts"]
23
+
24
+ df["success"] = df["outputs"].apply(lambda x: 0 if "Time-out" in x else 1)
25
+
26
+ student_numbers = sorted(df['active_students'].unique())
27
+
28
+ bins_dict = dict() # bins size for each group
29
+ min_finished_dict = dict() # zero time for each group
30
+
31
+ for student_number in student_numbers:
32
+ # for each student group calculates bins size and zero time
33
+ min_finished = df["finished_ts"][df["active_students"] == student_number].min()
34
+ max_finished = df["finished_ts"][df["active_students"] == student_number].max()
35
+ bins = math.ceil(max_finished - min_finished)
36
+ bins_dict.update({student_number: bins})
37
+ min_finished_dict.update({student_number: min_finished})
38
+ print(f"student number: {student_number}")
39
+ print(f"min finished: {min_finished}")
40
+ print(f"max finished: {max_finished}")
41
+ print(f"bins finished seconds: {bins}, minutes: {bins / 60}")
42
+
43
+ df["time_line"] = None
44
+ for student_number in student_numbers:
45
+ # calculates time-line for each student group
46
+ df["time_line"] = df.apply(
47
+ lambda x: x["finished_ts"] - min_finished_dict[student_number]
48
+ if x["active_students"] == student_number
49
+ else x["time_line"],
50
+ axis=1
51
+ )
52
+
53
+ # creates a '.csv' from the dataframe
54
+ df.to_csv(f"./data/processed_{log_file}", index=False, sep=";")
55
+
56
+ result = df.groupby(['active_students', 'success']) \
57
+ .agg({
58
+ 'elapsed': ['mean', 'median', 'min', 'max'],
59
+ 'success': ['count'],
60
+ })
61
+
62
+ print(f"Results for {log_file}")
63
+ print(result, "\n")
64
+
65
+ title = None
66
+ if "sentiment" in log_file.lower():
67
+ title = "API result for 'sentiment-analysis' endpoint"
68
+ elif "text2int" in log_file.lower():
69
+ title = "API result for 'text2int' endpoint"
70
+
71
+ for student_number in student_numbers:
72
+ # Prints percentage of the successful and failed calls
73
+ try:
74
+ failed_calls = result.loc[(student_number, 0), 'success'][0]
75
+ except:
76
+ failed_calls = 0
77
+ successful_calls = result.loc[(student_number, 1), 'success'][0]
78
+ percentage = (successful_calls / (failed_calls + successful_calls)) * 100
79
+ print(f"Percentage of successful API calls for {student_number} students: {percentage.__round__(2)}")
80
+
81
+ rows = len(student_numbers)
82
+
83
+ fig, axs = plt.subplots(rows, 2) # (rows, columns)
84
+
85
+ for index, student_number in enumerate(student_numbers):
86
+ # creates a boxplot for each test group
87
+ data = df[df["active_students"] == student_number]
88
+ axs[index][0].boxplot(x=data["elapsed"]) # axs[row][column]
89
+ # axs[index][0].set_title(f'Boxplot for {student_number} students')
90
+ axs[index][0].set_xlabel(f'student number {student_number}')
91
+ axs[index][0].set_ylabel('Elapsed time (s)')
92
+
93
+ # creates a histogram for each test group
94
+ axs[index][1].hist(x=data["elapsed"], bins=25) # axs[row][column]
95
+ # axs[index][1].set_title(f'Histogram for {student_number} students')
96
+ axs[index][1].set_xlabel('seconds')
97
+ axs[index][1].set_ylabel('Count of API calls')
98
+
99
+ fig.suptitle(title, fontsize=16)
100
+
101
+ fig, axs = plt.subplots(rows, 1) # (rows, columns)
102
+
103
+ for index, student_number in enumerate(student_numbers):
104
+ # creates a histogram and shows API calls on a timeline for each test group
105
+ data = df[df["active_students"] == student_number]
106
+
107
+ print(data["time_line"].head(10))
108
+
109
+ axs[index].hist(x=data["time_line"], bins=bins_dict[student_number]) # axs[row][column]
110
+ # axs[index][1].set_title(f'Histogram for {student_number} students')
111
+ axs[index].set_xlabel('seconds')
112
+ axs[index].set_ylabel('Count of API calls')
113
+
114
+ fig.suptitle(title, fontsize=16)
115
+
116
+ plt.show()
scripts/quiz/generators.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .questions import generate_question_data
2
+ from .utils import get_next_level
3
+
4
+
5
+ def start_interactive_math(right_answers=0, wrong_answers=0, level="easy"):
6
+ if wrong_answers > 2:
7
+ wrong_answers = 0
8
+ right_answers = 0
9
+ level = get_next_level(level, False)
10
+ elif right_answers > 2:
11
+ right_answers = 0
12
+ wrong_answers = 0
13
+ level = get_next_level(level)
14
+
15
+ question_data = generate_question_data(level)
16
+ question = question_data['question']
17
+ right_answer = question_data['answer']
18
+ cur_num = question_data['current_number']
19
+ ord_num = question_data['ordinal_number']
20
+ times = question_data['times']
21
+
22
+ numbers_group = [cur_num, ord_num, times]
23
+ output = {
24
+ "text": question,
25
+ "question_numbers": numbers_group,
26
+ "right_answer": right_answer,
27
+ 'number_correct': right_answers,
28
+ 'number_incorrect': wrong_answers,
29
+ 'level': level,
30
+ "hints_used": 0
31
+ }
32
+ return output
33
+
scripts/quiz/hints.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+
4
+ def generate_hint(question_nums, right_answer, right_answers, wrong_answers, level, hints_used):
5
+ ord_num = question_nums[1] # ordinal number
6
+ equation = right_answer - 2 * ord_num - 1
7
+ min_num = equation if equation > 0 else 0
8
+ seq_before = " ".join(
9
+ [str(num) for num in range(right_answer - ord_num, min_num, -ord_num)][::-1]
10
+ ) # sequence before right answer
11
+ seq_after = " ".join(
12
+ [str(num) for num in range(right_answer + ord_num, right_answer + 2 * ord_num + 1, ord_num)]
13
+ ) # sequence after right answer
14
+ hints = [
15
+ f"What number will fill the gap in a sequence {seq_before} ... {seq_after}?",
16
+ f"What number is {ord_num} in the account after {right_answer - ord_num}?",
17
+ f"What number is {ord_num} in the account before {right_answer + ord_num}?",
18
+ f"What number is greater than {right_answer - 1} and less than {right_answer + 1}?"
19
+ ]
20
+ rand_hint = random.choice(hints)
21
+ hints_used += 1
22
+
23
+ output = {
24
+ "text": rand_hint,
25
+ "question_numbers": question_nums,
26
+ "right_answer": right_answer,
27
+ 'number_correct': right_answers,
28
+ 'number_incorrect': wrong_answers,
29
+ 'level': level,
30
+ "hints_used": hints_used
31
+ }
32
+ return output
scripts/quiz/questions.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ from typing import Literal
3
+
4
+
5
+ def generate_question_data(level: Literal["easy", "medium", "hard"] = "easy"):
6
+ """generate question, its numbers and proper answer"""
7
+
8
+ nums = generate_numbers_by_level(level)
9
+ cur_num = nums['current_number'] # current number
10
+ ord_num = nums['ordinal_number'] # ordinal number
11
+ seq_up_by_one = generate_number_sequence(cur_num, ord_num=1, times=1) # sequence with ord_num = 1, times = 1
12
+
13
+ count_up_by_one_questions = [
14
+ {
15
+ "question": f"Let's practice counting. After {cur_num}, what number is next?\n{seq_up_by_one}",
16
+ "current_number": cur_num,
17
+ "ordinal_number": 1,
18
+ "times": 1,
19
+ "answer": cur_num + 1
20
+ }
21
+ ]
22
+ seq_up_by_ord = generate_number_sequence(cur_num, ord_num, times=1) # sequence with times = 1
23
+ count_up_by_ord_questions = [
24
+ {
25
+ "question": f"What number comes {ord_num} number after {cur_num}?\n{seq_up_by_ord}",
26
+ "current_number": cur_num,
27
+ "ordinal_number": ord_num,
28
+ "times": 1,
29
+ "answer": cur_num + ord_num
30
+ },
31
+ {
32
+ "question": f"If we count up {ord_num} from {cur_num}, what number is next?\n{seq_up_by_ord}",
33
+ "current_number": cur_num,
34
+ "ordinal_number": ord_num,
35
+ "times": 1,
36
+ "answer": cur_num + ord_num
37
+ }
38
+ ]
39
+ times = 1 if level == "easy" else nums['times']
40
+ times_ord_seq = generate_number_sequence(cur_num, ord_num, times)
41
+ times_ord_questions = [
42
+ {
43
+ "question": f"We're counting up by {times}s. What number is {ord_num} after {cur_num}?\n{times_ord_seq}",
44
+ "current_number": cur_num,
45
+ "ordinal_number": ord_num,
46
+ "times": times,
47
+ "answer": cur_num + ord_num * times
48
+ }
49
+ ]
50
+ times_only_seq = generate_number_sequence(cur_num, 1, times) # sequence with ordinal number = 1
51
+ times_only_questions = [
52
+ {
53
+ "question": f"Let's count up by {times}s. What number is next if we start from {cur_num}?\n{times_only_seq}",
54
+ "current_number": cur_num,
55
+ "ordinal_number": 1,
56
+ "times": times,
57
+ "answer": cur_num + times
58
+ }
59
+ ]
60
+ questions = [*count_up_by_one_questions, *count_up_by_ord_questions, *times_only_questions, *times_ord_questions]
61
+ random_choice = random.choice(questions)
62
+ return random_choice
63
+
64
+
65
+ def generate_numbers_by_level(level: Literal["easy", "medium", "hard"] = "easy"):
66
+ """generate current number, ordinal number and times parameter
67
+
68
+ returns
69
+ dict with params:
70
+ :param current_number: current number
71
+ :param ordinal numebr: the number we count up by
72
+ :param times: the number of times we count up by ordinal number"""
73
+
74
+ if level == "easy":
75
+ cur_num = random.randint(1, 8)
76
+ ord_num = random.randint(1, 2)
77
+ times = 1
78
+ elif level == "medium":
79
+ cur_num = random.randint(1, 94)
80
+ ord_num = random.randint(1, 3)
81
+ times = random.randint(1, 2)
82
+ elif level == "hard":
83
+ cur_num = random.randint(1, 488)
84
+ ord_num = random.randint(1, 4)
85
+ times = random.randint(1, 2)
86
+
87
+ return {
88
+ "current_number": cur_num,
89
+ "ordinal_number": ord_num,
90
+ "times": times
91
+ }
92
+
93
+
94
+ def generate_number_sequence(cur_num, ord_num, times=1):
95
+ """generate one of 2 sequences. For example we want 55 to be a right answer, then sequences can be:
96
+ 52 53 54 ...
97
+ ... 56 57 58
98
+
99
+ parameters
100
+ :cur_num: current number
101
+ :ord_num: ordinal number
102
+ :times: times"""
103
+ max_num = cur_num + times * ord_num
104
+
105
+ seq_before = [str(num) for num in range(max_num - times, 0, -times)][:3][::-1]
106
+ seq_after = [str(num) for num in range(max_num + times, max_num + 4 * times, times)]
107
+ seq_before.append("...")
108
+ seq_after.insert(0, "...")
109
+
110
+ seqs = []
111
+ if len(seq_before) == 4:
112
+ seqs.append(seq_before)
113
+ if len(seq_after) == 4:
114
+ seqs.append(seq_after)
115
+ rand_seq = " ".join(random.choice(seqs))
116
+ return rand_seq
scripts/quiz/utils.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ def get_next_level(cur_level, levep_up: Literal[True, False] = True):
4
+ if levep_up:
5
+ if cur_level == "easy":
6
+ return "medium"
7
+ else:
8
+ return "hard"
9
+ else:
10
+ if cur_level == "medium":
11
+ return "easy"
12
+ else:
13
+ return "medium"
static/styles.css ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ @import url('https://fonts.googleapis.com/css2?family=Roboto:wght@300&display=swap');
2
+
3
+ body {
4
+ font-family: 'Roboto', sans-serif;
5
+ font-size: 16px;
6
+ background-color: black;
7
+ color: white
8
+ }
templates/home.html ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8">
5
+ <title>Title</title>
6
+ <link rel="stylesheet" href="{{ url_for('static', path='/styles.css') }}">
7
+ </head>
8
+ <body>
9
+ <h2>Mathbot</h2>
10
+ <h3>Created with FastAPI</h3>
11
+
12
+ <h4>To make a request with python</h4>
13
+ <pre><code>
14
+ import requests
15
+
16
+ requests.post(
17
+ url='https://tangibleai-mathtext-fastapi.hf.space/sentiment-analysis',
18
+ json={"content": "I reject it"}
19
+ ).json()
20
+
21
+ requests.post(
22
+ url='https://tangibleai-mathtext-fastapi.hf.space/text2int',
23
+ json={"content": "forty two"}
24
+ ).json()
25
+
26
+ </code></pre>
27
+
28
+ <h4>To make a request with curl</h4>
29
+ <pre><code>
30
+ curl --silent -X POST "https://tangibleai-mathtext-fastapi.hf.space/sentiment-analysis" -H 'Content-Type: application/json' -d '{"content":"I am happy with it!"}'
31
+
32
+ curl --silent -X POST "https://tangibleai-mathtext-fastapi.hf.space/text2int" -H 'Content-Type: application/json' -d '{"content":"forty two"}'
33
+ </code></pre>
34
+ </body>
35
+ </html>
tests/__init__.py ADDED
File without changes
tests/test_text2int.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import unittest
2
+ from pathlib import Path
3
+
4
+ import pandas as pd
5
+ from fastapi.testclient import TestClient
6
+
7
+ from app import app
8
+
9
+ # The raw file URL has to be used for GitLab.
10
+ URL = "https://gitlab.com/tangibleai/community/mathtext/-/raw/main/mathtext/data/master_test_text2int.csv"
11
+
12
+ DATA_DIR = Path(__file__).parent.parent / "mathtext_fastapi" / "data"
13
+ print(DATA_DIR)
14
+
15
+ client = TestClient(app)
16
+
17
+
18
+ class TestStringMethods(unittest.TestCase):
19
+
20
+ def setUp(self):
21
+ """Creates a fastapi test client"""
22
+ self.client = TestClient(app)
23
+ self.df = pd.read_csv(URL)
24
+
25
+ def get_response_text2int(self, text):
26
+ """Makes a post request to the endpoint"""
27
+ r = None
28
+ try:
29
+ r = self.client.post("/text2int", json={"content": text}) \
30
+ .json().get("message")
31
+ except:
32
+ pass
33
+ return r
34
+
35
+ def test_endpoint_text2int(self):
36
+ """Tests if endpoint is working"""
37
+ response = self.client.post("/text2int",
38
+ json={"content": "fourteen"}
39
+ )
40
+ self.assertEqual(response.status_code, 200)
41
+
42
+ def test_acc_score_text2int(self):
43
+ """Calculates accuracy score for endpoint"""
44
+
45
+ self.df["text2int"] = self.df["input"].apply(func=self.get_response_text2int)
46
+ self.df["score"] = self.df[["output", "text2int"]].apply(
47
+ lambda row: row[0] == row[1],
48
+ axis=1
49
+ )
50
+ self.df.to_csv(f"{DATA_DIR}/text2int_results.csv", index=False)
51
+ acc_score = self.df["score"].mean().__round__(2)
52
+
53
+ self.assertGreaterEqual(acc_score, 0.5, f"Accuracy score: '{acc_score}'. Value is too low!")
54
+
55
+
56
+ if __name__ == '__main__':
57
+ unittest.main()