Pierre Lepagnol
commited on
Commit
•
98ba4ab
1
Parent(s):
c50586b
minor fix
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Bioresponse/label.json +0 -3
- Bioresponse/rules.json +0 -3
- Bioresponse/test.json +0 -3
- Bioresponse/train.json +0 -3
- Bioresponse/valid.json +0 -3
- PhishingWebsites/label.json +0 -3
- PhishingWebsites/rules.json +0 -3
- PhishingWebsites/test.json +0 -3
- PhishingWebsites/train.json +0 -3
- PhishingWebsites/valid.json +0 -3
- agnews/readme.txt +86 -86
- agnews/test.json +2 -2
- agnews/train.json +2 -2
- agnews/valid.json +2 -2
- bank-marketing/label.json +0 -3
- bank-marketing/rules.json +0 -3
- bank-marketing/test.json +0 -3
- bank-marketing/train.json +0 -3
- bank-marketing/valid.json +0 -3
- basketball/readme.txt +17 -17
- basketball/test.json +2 -2
- basketball/train.json +2 -2
- basketball/valid.json +2 -2
- bc5cdr/test.json +2 -2
- bc5cdr/train.json +2 -2
- bc5cdr/valid.json +2 -2
- cdr/readme.txt +285 -285
- cdr/test.json +2 -2
- cdr/train.json +2 -2
- cdr/valid.json +2 -2
- census/labeled_ids.json +0 -3
- census/test.json +2 -2
- census/train.json +2 -2
- census/valid.json +2 -2
- chemprot/readme.txt +212 -212
- chemprot/test.json +2 -2
- chemprot/train.json +2 -2
- chemprot/valid.json +2 -2
- commercial/readme.txt +22 -22
- commercial/test.json +2 -2
- commercial/train.json +0 -3
- commercial/valid.json +2 -2
- conll/test.json +2 -2
- conll/train.json +2 -2
- conll/valid.json +2 -2
- imdb/readme.txt +59 -59
- imdb/test.json +2 -2
- imdb/train.json +2 -2
- imdb/valid.json +2 -2
- laptopreview/test.json +2 -2
Bioresponse/label.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2faf7e495bfc38eecea5d668ff0dee281d7fa4a672416986539a97400dc9ae40
|
3 |
-
size 24
|
|
|
|
|
|
|
|
Bioresponse/rules.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:0e2535a290e315ccc4d33b61657a3d127015bfa631507697b61d272e15b11d9f
|
3 |
-
size 12127
|
|
|
|
|
|
|
|
Bioresponse/test.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:39f4c27b3ec82c3aba6380d2c2cc3e5d19bdded35332c96ace622ae094792356
|
3 |
-
size 3739085
|
|
|
|
|
|
|
|
Bioresponse/train.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:9c597e58013825c4cf6f83cd9c0126decd5c99d791561dddcb9498d428092ae6
|
3 |
-
size 29881496
|
|
|
|
|
|
|
|
Bioresponse/valid.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2e6b0adf12724a8c9212cc306cd4c5ec7d0a9137684ccd426d29ebe376de6a02
|
3 |
-
size 3735356
|
|
|
|
|
|
|
|
PhishingWebsites/label.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:403571d3200240e03891ce786ab4d9b38ea679847255424682f5e9e24610b64e
|
3 |
-
size 25
|
|
|
|
|
|
|
|
PhishingWebsites/rules.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:5152408d87e86a39a905b7e348b363a055be82dfb81cab995eb742e995623ec4
|
3 |
-
size 9428
|
|
|
|
|
|
|
|
PhishingWebsites/test.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3410f3625ffc2ea6bf08d5c33d2bcd9db486312d0fcf0655702ede6cc913051f
|
3 |
-
size 231431
|
|
|
|
|
|
|
|
PhishingWebsites/train.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:25d42c4d2dc63a221deb3d1a7ccf42471924ab0873c6ba494ad6b877ab718898
|
3 |
-
size 1858071
|
|
|
|
|
|
|
|
PhishingWebsites/valid.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:9ef17933fc5cac3ab6b5773249600e8e50f3c9b9e9826f5ffcc3d9279ad60d1e
|
3 |
-
size 231168
|
|
|
|
|
|
|
|
agnews/readme.txt
CHANGED
@@ -1,87 +1,87 @@
|
|
1 |
-
Agnews Topic classification dataset
|
2 |
-
|
3 |
-
https://github.com/weakrules/Denoise-multi-weak-sources/blob/master/rules-noisy-labels/Agnews/angews_rule.py
|
4 |
-
|
5 |
-
|
6 |
-
# Labels
|
7 |
-
"0": "World",
|
8 |
-
"1": "Sports",
|
9 |
-
"2": "Business",
|
10 |
-
"3": "Sci/Tech"
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
# Labeling functions (all 9 lf are keyword lf)
|
19 |
-
|
20 |
-
## LF1 0: world
|
21 |
-
|
22 |
-
r1 = ["atomic", "captives", "baghdad", "israeli", "iraqis", "iranian", "afghanistan", "wounding", "terrorism", "soldiers", \
|
23 |
-
"palestinians", "palestinian", "policemen", "iraqi", "terrorist", 'north korea', 'korea', \
|
24 |
-
'israel', 'u.n.', 'egypt', 'iran', 'iraq', 'nato', 'armed', 'peace']
|
25 |
-
|
26 |
-
|
27 |
-
## LF2 0: world
|
28 |
-
|
29 |
-
r2= [' war ', 'prime minister', 'president', 'commander', 'minister', 'annan', "military", "militant", "kill", 'operator']
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
## LF3 1: sports
|
35 |
-
|
36 |
-
r3 = ["goals", "bledsoe", "coaches", "touchdowns", "kansas", "rankings", "no.", \
|
37 |
-
"champ", "cricketers", "hockey", "champions", "quarterback", 'club', 'team', 'baseball', 'basketball', 'soccer', 'football', 'boxing', 'swimming', \
|
38 |
-
'world cup', 'nba',"olympics","final", "finals", 'fifa', 'racist', 'racism']
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
## LF4 1: sports
|
43 |
-
|
44 |
-
r4 = ['athlete', 'striker', 'defender', 'goalkeeper', 'midfielder', 'shooting guard', 'power forward', 'point guard', 'pitcher', 'catcher', 'first base', 'second base', 'third base','shortstop','fielder']
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
## LF5 1: sports
|
50 |
-
|
51 |
-
r5=['lakers','chelsea', 'piston','cavaliers', 'rockets', 'clippers','ronaldo', \
|
52 |
-
'celtics', 'hawks','76ers', 'raptors', 'pacers', 'suns', 'warriors','blazers','knicks','timberwolves', 'hornets', 'wizards', 'nuggets', 'mavericks', 'grizzlies', 'spurs', \
|
53 |
-
'cowboys', 'redskins', 'falcons', 'panthers', 'eagles', 'saints', 'buccaneers', '49ers', 'cardinals', 'texans', 'seahawks', 'vikings', 'patriots', 'colts', 'jaguars', 'raiders', 'chargers', 'bengals', 'steelers', 'browns', \
|
54 |
-
'braves','marlins','mets','phillies','cubs','brewers','cardinals', 'diamondbacks','rockies', 'dodgers', 'padres', 'orioles', 'sox', 'yankees', 'jays', 'sox', 'indians', 'tigers', 'royals', 'twins','astros', 'angels', 'athletics', 'mariners', 'rangers', \
|
55 |
-
'arsenal', 'burnley', 'newcastle', 'leicester', 'manchester united', 'everton', 'southampton', 'hotspur','tottenham', 'fulham', 'watford', 'sheffield','crystal palace', 'derby', 'charlton', 'aston villa', 'blackburn', 'west ham', 'birmingham city', 'middlesbrough', \
|
56 |
-
'real madrid', 'barcelona', 'villarreal', 'valencia', 'betis', 'espanyol','levante', 'sevilla', 'juventus', 'inter milan', 'ac milan', 'as roma', 'benfica', 'porto', 'getafe', 'bayern', 'schalke', 'bremen', 'lyon', 'paris saint', 'monaco', 'dynamo']
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
## LF6 3: tech
|
62 |
-
|
63 |
-
r6 = ["technology", "engineering", "science", "research", "cpu", "windows", "unix", "system", 'computing', 'compute']#, "wireless","chip", "pc", ]
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
## LF7 3: tech
|
69 |
-
|
70 |
-
r7= ["google", "apple", "microsoft", "nasa", "yahoo", "intel", "dell", \
|
71 |
-
'huawei',"ibm", "siemens", "nokia", "samsung", 'panasonic', \
|
72 |
-
't-mobile', 'nvidia', 'adobe', 'salesforce', 'linkedin', 'silicon', 'wiki'
|
73 |
-
]
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
## LF8 - 2:business
|
79 |
-
|
80 |
-
r8= ["stock", "account", "financ", "goods", "retail", 'economy', 'chairman', 'bank', 'deposit', 'economic', 'dow jones', 'index', '$', 'percent', 'interest rate', 'growth', 'profit', 'tax', 'loan', 'credit', 'invest']
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
## LF9 - 2:business
|
86 |
-
|
87 |
r9= ["delta", "cola", "toyota", "costco", "gucci", 'citibank', 'airlines']
|
|
|
1 |
+
Agnews Topic classification dataset
|
2 |
+
|
3 |
+
https://github.com/weakrules/Denoise-multi-weak-sources/blob/master/rules-noisy-labels/Agnews/angews_rule.py
|
4 |
+
|
5 |
+
|
6 |
+
# Labels
|
7 |
+
"0": "World",
|
8 |
+
"1": "Sports",
|
9 |
+
"2": "Business",
|
10 |
+
"3": "Sci/Tech"
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
# Labeling functions (all 9 lf are keyword lf)
|
19 |
+
|
20 |
+
## LF1 0: world
|
21 |
+
|
22 |
+
r1 = ["atomic", "captives", "baghdad", "israeli", "iraqis", "iranian", "afghanistan", "wounding", "terrorism", "soldiers", \
|
23 |
+
"palestinians", "palestinian", "policemen", "iraqi", "terrorist", 'north korea', 'korea', \
|
24 |
+
'israel', 'u.n.', 'egypt', 'iran', 'iraq', 'nato', 'armed', 'peace']
|
25 |
+
|
26 |
+
|
27 |
+
## LF2 0: world
|
28 |
+
|
29 |
+
r2= [' war ', 'prime minister', 'president', 'commander', 'minister', 'annan', "military", "militant", "kill", 'operator']
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
## LF3 1: sports
|
35 |
+
|
36 |
+
r3 = ["goals", "bledsoe", "coaches", "touchdowns", "kansas", "rankings", "no.", \
|
37 |
+
"champ", "cricketers", "hockey", "champions", "quarterback", 'club', 'team', 'baseball', 'basketball', 'soccer', 'football', 'boxing', 'swimming', \
|
38 |
+
'world cup', 'nba',"olympics","final", "finals", 'fifa', 'racist', 'racism']
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
## LF4 1: sports
|
43 |
+
|
44 |
+
r4 = ['athlete', 'striker', 'defender', 'goalkeeper', 'midfielder', 'shooting guard', 'power forward', 'point guard', 'pitcher', 'catcher', 'first base', 'second base', 'third base','shortstop','fielder']
|
45 |
+
|
46 |
+
|
47 |
+
|
48 |
+
|
49 |
+
## LF5 1: sports
|
50 |
+
|
51 |
+
r5=['lakers','chelsea', 'piston','cavaliers', 'rockets', 'clippers','ronaldo', \
|
52 |
+
'celtics', 'hawks','76ers', 'raptors', 'pacers', 'suns', 'warriors','blazers','knicks','timberwolves', 'hornets', 'wizards', 'nuggets', 'mavericks', 'grizzlies', 'spurs', \
|
53 |
+
'cowboys', 'redskins', 'falcons', 'panthers', 'eagles', 'saints', 'buccaneers', '49ers', 'cardinals', 'texans', 'seahawks', 'vikings', 'patriots', 'colts', 'jaguars', 'raiders', 'chargers', 'bengals', 'steelers', 'browns', \
|
54 |
+
'braves','marlins','mets','phillies','cubs','brewers','cardinals', 'diamondbacks','rockies', 'dodgers', 'padres', 'orioles', 'sox', 'yankees', 'jays', 'sox', 'indians', 'tigers', 'royals', 'twins','astros', 'angels', 'athletics', 'mariners', 'rangers', \
|
55 |
+
'arsenal', 'burnley', 'newcastle', 'leicester', 'manchester united', 'everton', 'southampton', 'hotspur','tottenham', 'fulham', 'watford', 'sheffield','crystal palace', 'derby', 'charlton', 'aston villa', 'blackburn', 'west ham', 'birmingham city', 'middlesbrough', \
|
56 |
+
'real madrid', 'barcelona', 'villarreal', 'valencia', 'betis', 'espanyol','levante', 'sevilla', 'juventus', 'inter milan', 'ac milan', 'as roma', 'benfica', 'porto', 'getafe', 'bayern', 'schalke', 'bremen', 'lyon', 'paris saint', 'monaco', 'dynamo']
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
## LF6 3: tech
|
62 |
+
|
63 |
+
r6 = ["technology", "engineering", "science", "research", "cpu", "windows", "unix", "system", 'computing', 'compute']#, "wireless","chip", "pc", ]
|
64 |
+
|
65 |
+
|
66 |
+
|
67 |
+
|
68 |
+
## LF7 3: tech
|
69 |
+
|
70 |
+
r7= ["google", "apple", "microsoft", "nasa", "yahoo", "intel", "dell", \
|
71 |
+
'huawei',"ibm", "siemens", "nokia", "samsung", 'panasonic', \
|
72 |
+
't-mobile', 'nvidia', 'adobe', 'salesforce', 'linkedin', 'silicon', 'wiki'
|
73 |
+
]
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
## LF8 - 2:business
|
79 |
+
|
80 |
+
r8= ["stock", "account", "financ", "goods", "retail", 'economy', 'chairman', 'bank', 'deposit', 'economic', 'dow jones', 'index', '$', 'percent', 'interest rate', 'growth', 'profit', 'tax', 'loan', 'credit', 'invest']
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
|
85 |
+
## LF9 - 2:business
|
86 |
+
|
87 |
r9= ["delta", "cola", "toyota", "costco", "gucci", 'citibank', 'airlines']
|
agnews/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6b269a338214de9b3827d3056a9ae87c97679efe8a052efeb02b0925a62920eb
|
3 |
+
size 5571507
|
agnews/train.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0f8c691e32d36ba1aad6257e67272040f3d83fdac272d7ac4ac7a9dfd3050d7c
|
3 |
+
size 44651077
|
agnews/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a58afcd585ce5f5be160a9c4d43fb5873f2ffaaaae932a5901674b5297e342a9
|
3 |
+
size 5534430
|
bank-marketing/label.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:3076495f06f7531a7c9335d92d53d85e262656bbf2087e2408189f4e28b05852
|
3 |
-
size 24
|
|
|
|
|
|
|
|
bank-marketing/rules.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:bc7b243ae6ecb6142afc28dfdee3b599ccffeb5ad930f092e16968c8c9c14385
|
3 |
-
size 11930
|
|
|
|
|
|
|
|
bank-marketing/test.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:4f43dd1db6c0c9a34b27ca436265ca958f2495cc54c82a243c0c8c5e593545e0
|
3 |
-
size 1030431
|
|
|
|
|
|
|
|
bank-marketing/train.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:ebcbb66a468bed1f8c62a699933755f6584aae865601af121ec7f6111fdd0987
|
3 |
-
size 8276030
|
|
|
|
|
|
|
|
bank-marketing/valid.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:791340aaa6fb44680990cea55bebb503fec2ce7018d170995027ee0dd1142022
|
3 |
-
size 1030349
|
|
|
|
|
|
|
|
basketball/readme.txt
CHANGED
@@ -1,18 +1,18 @@
|
|
1 |
-
Basketball - A Video Dataset for Activity Recognition in the Basketball Game
|
2 |
-
|
3 |
-
# Source:
|
4 |
-
|
5 |
-
D. Y. Fu, M. F. Chen, F. Sala, S. M. Hooper, K. Fatahalian, and C. Ré. Fast and three-rious: Speeding up weak supervision with triplet methods. In ICML, pages 3280–3291, 2020.
|
6 |
-
|
7 |
-
|
8 |
-
# Labels:
|
9 |
-
|
10 |
-
0: negative (the game is not basketball)
|
11 |
-
|
12 |
-
1: positive (the game is basketball)
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
4 Labeling functions
|
17 |
-
|
18 |
LFs: these sources rely on an off-the-shelf object detector to detect balls or people, and use heuristics based on the average pixel of the detected ball or distance between the ball and person to determine whether the sport being played is basketball or not.
|
|
|
1 |
+
Basketball - A Video Dataset for Activity Recognition in the Basketball Game
|
2 |
+
|
3 |
+
# Source:
|
4 |
+
|
5 |
+
D. Y. Fu, M. F. Chen, F. Sala, S. M. Hooper, K. Fatahalian, and C. Ré. Fast and three-rious: Speeding up weak supervision with triplet methods. In ICML, pages 3280–3291, 2020.
|
6 |
+
|
7 |
+
|
8 |
+
# Labels:
|
9 |
+
|
10 |
+
0: negative (the game is not basketball)
|
11 |
+
|
12 |
+
1: positive (the game is basketball)
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
4 Labeling functions
|
17 |
+
|
18 |
LFs: these sources rely on an off-the-shelf object detector to detect balls or people, and use heuristics based on the average pixel of the detected ball or distance between the ball and person to determine whether the sport being played is basketball or not.
|
basketball/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b1a460aa8a3de9bfc7e36986c31546c4bcd6fa341716d3e1ae98e4ff820acb34
|
3 |
+
size 52410041
|
basketball/train.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d3fdf3186fee1430ce7ccf9f3020e7e7a51242ade6f97b9b9c27056b7d9af45
|
3 |
+
size 771247408
|
basketball/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:87df5c70192b6ab0c28e9a97c283a20cf8483aefaf9a2b8b237e4abb572c8dff
|
3 |
+
size 45660469
|
bc5cdr/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ba73806d670943235fabddb8551854559ce429f5b00d0dea48d9a939b753daf6
|
3 |
+
size 18436624
|
bc5cdr/train.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1de64778f41b861f01568325f7aff66c4d18155bf112d46b3fd4103bd6730d1
|
3 |
+
size 17677316
|
bc5cdr/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4c3b537ca3fb51c52380304743381dc3219fa77c5d7a8886a5c9a637218903a9
|
3 |
+
size 17537198
|
cdr/readme.txt
CHANGED
@@ -1,286 +1,286 @@
|
|
1 |
-
CDR - Extracting Chemical-Disease Relations from Academic Literature
|
2 |
-
|
3 |
-
# Source:
|
4 |
-
https://github.com/snorkel-team/snorkel-extraction/tree/master/tutorials/cdr
|
5 |
-
|
6 |
-
# Labels:
|
7 |
-
|
8 |
-
0: Negative, the drug does NOT induce the disease
|
9 |
-
1: Positive, the drug induces the disease
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
33 Label functions (Use ctrl+F to search for implementation)
|
14 |
-
LFs = [
|
15 |
-
LF_c_cause_d,
|
16 |
-
LF_c_d,
|
17 |
-
LF_c_induced_d,
|
18 |
-
LF_c_treat_d,
|
19 |
-
LF_c_treat_d_wide,
|
20 |
-
LF_closer_chem,
|
21 |
-
LF_closer_dis,
|
22 |
-
LF_ctd_marker_c_d,
|
23 |
-
LF_ctd_marker_induce,
|
24 |
-
LF_ctd_therapy_treat,
|
25 |
-
LF_ctd_unspecified_treat,
|
26 |
-
LF_ctd_unspecified_induce,
|
27 |
-
LF_d_following_c,
|
28 |
-
LF_d_induced_by_c,
|
29 |
-
LF_d_induced_by_c_tight,
|
30 |
-
LF_d_treat_c,
|
31 |
-
LF_develop_d_following_c,
|
32 |
-
LF_far_c_d,
|
33 |
-
LF_far_d_c,
|
34 |
-
LF_improve_before_disease,
|
35 |
-
LF_in_ctd_therapy,
|
36 |
-
LF_in_ctd_marker,
|
37 |
-
LF_in_patient_with,
|
38 |
-
LF_induce,
|
39 |
-
LF_induce_name,
|
40 |
-
LF_induced_other,
|
41 |
-
LF_level,
|
42 |
-
LF_measure,
|
43 |
-
LF_neg_d,
|
44 |
-
LF_risk_d,
|
45 |
-
LF_treat_d,
|
46 |
-
LF_uncertain,
|
47 |
-
LF_weak_assertions,
|
48 |
-
]
|
49 |
-
|
50 |
-
|
51 |
-
##### Distant supervision approaches
|
52 |
-
# We'll use the [Comparative Toxicogenomics Database](http://ctdbase.org/) (CTD) for distant supervision.
|
53 |
-
# The CTD lists chemical-condition entity pairs under three categories: therapy, marker, and unspecified.
|
54 |
-
# Therapy means the chemical treats the condition, marker means the chemical is typically present with the condition,
|
55 |
-
# and unspecified is...unspecified. We can write LFs based on these categories.
|
56 |
-
|
57 |
-
### LF_in_ctd_unspecified
|
58 |
-
def LF_in_ctd_unspecified(c):
|
59 |
-
return -1 * cand_in_ctd_unspecified(c)
|
60 |
-
|
61 |
-
### LF_in_ctd_therapy
|
62 |
-
def LF_in_ctd_therapy(c):
|
63 |
-
return -1 * cand_in_ctd_therapy(c)
|
64 |
-
|
65 |
-
### LF_in_ctd_marker
|
66 |
-
def LF_in_ctd_marker(c):
|
67 |
-
return cand_in_ctd_marker(c)
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
##### Text pattern approaches
|
74 |
-
# Now we'll use some LF helpers to create LFs based on indicative text patterns.
|
75 |
-
# We came up with these rules by using the viewer to examine training candidates and noting frequent patterns.
|
76 |
-
|
77 |
-
import re
|
78 |
-
from snorkel.lf_helpers import (
|
79 |
-
get_tagged_text,
|
80 |
-
rule_regex_search_tagged_text,
|
81 |
-
rule_regex_search_btw_AB,
|
82 |
-
rule_regex_search_btw_BA,
|
83 |
-
rule_regex_search_before_A,
|
84 |
-
rule_regex_search_before_B,
|
85 |
-
)
|
86 |
-
|
87 |
-
# List to parenthetical
|
88 |
-
def ltp(x):
|
89 |
-
return '(' + '|'.join(x) + ')'
|
90 |
-
|
91 |
-
### LF_induce
|
92 |
-
def LF_induce(c):
|
93 |
-
return 1 if re.search(r'{{A}}.{0,20}induc.{0,20}{{B}}', get_tagged_text(c), flags=re.I) else 0
|
94 |
-
|
95 |
-
### LF_d_induced_by_c
|
96 |
-
causal_past = ['induced', 'caused', 'due']
|
97 |
-
def LF_d_induced_by_c(c):
|
98 |
-
return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(causal_past) + '.{0,9}(by|to).{0,50}', 1)
|
99 |
-
|
100 |
-
### LF_d_induced_by_c_tight
|
101 |
-
def LF_d_induced_by_c_tight(c):
|
102 |
-
return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(causal_past) + ' (by|to) ', 1)
|
103 |
-
|
104 |
-
### LF_induce_name
|
105 |
-
def LF_induce_name(c):
|
106 |
-
return 1 if 'induc' in c.chemical.get_span().lower() else 0
|
107 |
-
|
108 |
-
### LF_c_cause_d
|
109 |
-
causal = ['cause[sd]?', 'induce[sd]?', 'associated with']
|
110 |
-
def LF_c_cause_d(c):
|
111 |
-
return 1 if (
|
112 |
-
re.search(r'{{A}}.{0,50} ' + ltp(causal) + '.{0,50}{{B}}', get_tagged_text(c), re.I)
|
113 |
-
and not re.search('{{A}}.{0,50}(not|no).{0,20}' + ltp(causal) + '.{0,50}{{B}}', get_tagged_text(c), re.I)
|
114 |
-
) else 0
|
115 |
-
|
116 |
-
### LF_d_treat_c
|
117 |
-
treat = ['treat', 'effective', 'prevent', 'resistant', 'slow', 'promise', 'therap']
|
118 |
-
def LF_d_treat_c(c):
|
119 |
-
return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(treat) + '.{0,50}', -1)
|
120 |
-
|
121 |
-
### LF_c_treat_d
|
122 |
-
def LF_c_treat_d(c):
|
123 |
-
return rule_regex_search_btw_AB(c, '.{0,50}' + ltp(treat) + '.{0,50}', -1)
|
124 |
-
|
125 |
-
### LF_treat_d
|
126 |
-
def LF_treat_d(c):
|
127 |
-
return rule_regex_search_before_B(c, ltp(treat) + '.{0,50}', -1)
|
128 |
-
|
129 |
-
### LF_c_treat_d_wide
|
130 |
-
def LF_c_treat_d_wide(c):
|
131 |
-
return rule_regex_search_btw_AB(c, '.{0,200}' + ltp(treat) + '.{0,200}', -1)
|
132 |
-
|
133 |
-
### LF_c_d
|
134 |
-
def LF_c_d(c):
|
135 |
-
return 1 if ('{{A}} {{B}}' in get_tagged_text(c)) else 0
|
136 |
-
|
137 |
-
### LF_c_induced_d
|
138 |
-
def LF_c_induced_d(c):
|
139 |
-
return 1 if (
|
140 |
-
('{{A}} {{B}}' in get_tagged_text(c)) and
|
141 |
-
(('-induc' in c[0].get_span().lower()) or ('-assoc' in c[0].get_span().lower()))
|
142 |
-
) else 0
|
143 |
-
|
144 |
-
### LF_improve_before_disease
|
145 |
-
def LF_improve_before_disease(c):
|
146 |
-
return rule_regex_search_before_B(c, 'improv.*', -1)
|
147 |
-
|
148 |
-
### LF_in_patient_with
|
149 |
-
pat_terms = ['in a patient with ', 'in patients with']
|
150 |
-
def LF_in_patient_with(c):
|
151 |
-
return -1 if re.search(ltp(pat_terms) + '{{B}}', get_tagged_text(c), flags=re.I) else 0
|
152 |
-
|
153 |
-
### LF_uncertain
|
154 |
-
uncertain = ['combin', 'possible', 'unlikely']
|
155 |
-
def LF_uncertain(c):
|
156 |
-
return rule_regex_search_before_A(c, ltp(uncertain) + '.*', -1)
|
157 |
-
|
158 |
-
### LF_induced_other
|
159 |
-
def LF_induced_other(c):
|
160 |
-
return rule_regex_search_tagged_text(c, '{{A}}.{20,1000}-induced {{B}}', -1)
|
161 |
-
|
162 |
-
### LF_far_c_d
|
163 |
-
def LF_far_c_d(c):
|
164 |
-
return rule_regex_search_btw_AB(c, '.{100,5000}', -1)
|
165 |
-
|
166 |
-
### LF_far_d_c
|
167 |
-
def LF_far_d_c(c):
|
168 |
-
return rule_regex_search_btw_BA(c, '.{100,5000}', -1)
|
169 |
-
|
170 |
-
### LF_risk_d
|
171 |
-
def LF_risk_d(c):
|
172 |
-
return rule_regex_search_before_B(c, 'risk of ', 1)
|
173 |
-
|
174 |
-
### LF_develop_d_following_c
|
175 |
-
def LF_develop_d_following_c(c):
|
176 |
-
return 1 if re.search(r'develop.{0,25}{{B}}.{0,25}following.{0,25}{{A}}', get_tagged_text(c), flags=re.I) else 0
|
177 |
-
|
178 |
-
### LF_d_following_c
|
179 |
-
procedure, following = ['inject', 'administrat'], ['following']
|
180 |
-
def LF_d_following_c(c):
|
181 |
-
return 1 if re.search('{{B}}.{0,50}' + ltp(following) + '.{0,20}{{A}}.{0,50}' + ltp(procedure), get_tagged_text(c), flags=re.I) else 0
|
182 |
-
|
183 |
-
### LF_measure
|
184 |
-
def LF_measure(c):
|
185 |
-
return -1 if re.search('measur.{0,75}{{A}}', get_tagged_text(c), flags=re.I) else 0
|
186 |
-
|
187 |
-
### LF_level
|
188 |
-
def LF_level(c):
|
189 |
-
return -1 if re.search('{{A}}.{0,25} level', get_tagged_text(c), flags=re.I) else 0
|
190 |
-
|
191 |
-
### LF_neg_d
|
192 |
-
def LF_neg_d(c):
|
193 |
-
return -1 if re.search('(none|not|no) .{0,25}{{B}}', get_tagged_text(c), flags=re.I) else 0
|
194 |
-
|
195 |
-
### LF_weak_assertions
|
196 |
-
WEAK_PHRASES = ['none', 'although', 'was carried out', 'was conducted',
|
197 |
-
'seems', 'suggests', 'risk', 'implicated',
|
198 |
-
'the aim', 'to (investigate|assess|study)']
|
199 |
-
|
200 |
-
WEAK_RGX = r'|'.join(WEAK_PHRASES)
|
201 |
-
def LF_weak_assertions(c):
|
202 |
-
return -1 if re.search(WEAK_RGX, get_tagged_text(c), flags=re.I) else 0
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
##### Composite LFs
|
210 |
-
|
211 |
-
# The following LFs take some of the strongest distant supervision and text pattern LFs,
|
212 |
-
# and combine them to form more specific LFs. These LFs introduce some obvious
|
213 |
-
# dependencies within the LF set, which we will model later.
|
214 |
-
|
215 |
-
### LF_ctd_marker_c_d
|
216 |
-
def LF_ctd_marker_c_d(c):
|
217 |
-
return LF_c_d(c) * cand_in_ctd_marker(c)
|
218 |
-
|
219 |
-
### LF_ctd_marker_induce
|
220 |
-
def LF_ctd_marker_induce(c):
|
221 |
-
return (LF_c_induced_d(c) or LF_d_induced_by_c_tight(c)) * cand_in_ctd_marker(c)
|
222 |
-
|
223 |
-
### LF_ctd_therapy_treat
|
224 |
-
def LF_ctd_therapy_treat(c):
|
225 |
-
return LF_c_treat_d_wide(c) * cand_in_ctd_therapy(c)
|
226 |
-
|
227 |
-
### LF_ctd_unspecified_treat
|
228 |
-
def LF_ctd_unspecified_treat(c):
|
229 |
-
return LF_c_treat_d_wide(c) * cand_in_ctd_unspecified(c)
|
230 |
-
|
231 |
-
### LF_ctd_unspecified_induce
|
232 |
-
def LF_ctd_unspecified_induce(c):
|
233 |
-
return (LF_c_induced_d(c) or LF_d_induced_by_c_tight(c)) * cand_in_ctd_unspecified(c)
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
##### Rules based on context hierarchy
|
241 |
-
# These last two rules will make use of the context hierarchy.
|
242 |
-
# The first checks if there is a chemical mention much closer to the candidate's disease mention
|
243 |
-
# than the candidate's chemical mention. The second does the analog for diseases.
|
244 |
-
|
245 |
-
### LF_closer_chem
|
246 |
-
def LF_closer_chem(c):
|
247 |
-
# Get distance between chemical and disease
|
248 |
-
chem_start, chem_end = c.chemical.get_word_start(), c.chemical.get_word_end()
|
249 |
-
dis_start, dis_end = c.disease.get_word_start(), c.disease.get_word_end()
|
250 |
-
if dis_start < chem_start:
|
251 |
-
dist = chem_start - dis_end
|
252 |
-
else:
|
253 |
-
dist = dis_start - chem_end
|
254 |
-
# Try to find chemical closer than @dist/2 in either direction
|
255 |
-
sent = c.get_parent()
|
256 |
-
closest_other_chem = float('inf')
|
257 |
-
for i in range(dis_end, min(len(sent.words), dis_end + dist // 2)):
|
258 |
-
et, cid = sent.entity_types[i], sent.entity_cids[i]
|
259 |
-
if et == 'Chemical' and cid != sent.entity_cids[chem_start]:
|
260 |
-
return -1
|
261 |
-
for i in range(max(0, dis_start - dist // 2), dis_start):
|
262 |
-
et, cid = sent.entity_types[i], sent.entity_cids[i]
|
263 |
-
if et == 'Chemical' and cid != sent.entity_cids[chem_start]:
|
264 |
-
return -1
|
265 |
-
return 0
|
266 |
-
|
267 |
-
### LF_closer_dis
|
268 |
-
def LF_closer_dis(c):
|
269 |
-
# Get distance between chemical and disease
|
270 |
-
chem_start, chem_end = c.chemical.get_word_start(), c.chemical.get_word_end()
|
271 |
-
dis_start, dis_end = c.disease.get_word_start(), c.disease.get_word_end()
|
272 |
-
if dis_start < chem_start:
|
273 |
-
dist = chem_start - dis_end
|
274 |
-
else:
|
275 |
-
dist = dis_start - chem_end
|
276 |
-
# Try to find chemical disease than @dist/8 in either direction
|
277 |
-
sent = c.get_parent()
|
278 |
-
for i in range(chem_end, min(len(sent.words), chem_end + dist // 8)):
|
279 |
-
et, cid = sent.entity_types[i], sent.entity_cids[i]
|
280 |
-
if et == 'Disease' and cid != sent.entity_cids[dis_start]:
|
281 |
-
return -1
|
282 |
-
for i in range(max(0, chem_start - dist // 8), chem_start):
|
283 |
-
et, cid = sent.entity_types[i], sent.entity_cids[i]
|
284 |
-
if et == 'Disease' and cid != sent.entity_cids[dis_start]:
|
285 |
-
return -1
|
286 |
return 0
|
|
|
1 |
+
CDR - Extracting Chemical-Disease Relations from Academic Literature
|
2 |
+
|
3 |
+
# Source:
|
4 |
+
https://github.com/snorkel-team/snorkel-extraction/tree/master/tutorials/cdr
|
5 |
+
|
6 |
+
# Labels:
|
7 |
+
|
8 |
+
0: Negative, the drug does NOT induce the disease
|
9 |
+
1: Positive, the drug induces the disease
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
33 Label functions (Use ctrl+F to search for implementation)
|
14 |
+
LFs = [
|
15 |
+
LF_c_cause_d,
|
16 |
+
LF_c_d,
|
17 |
+
LF_c_induced_d,
|
18 |
+
LF_c_treat_d,
|
19 |
+
LF_c_treat_d_wide,
|
20 |
+
LF_closer_chem,
|
21 |
+
LF_closer_dis,
|
22 |
+
LF_ctd_marker_c_d,
|
23 |
+
LF_ctd_marker_induce,
|
24 |
+
LF_ctd_therapy_treat,
|
25 |
+
LF_ctd_unspecified_treat,
|
26 |
+
LF_ctd_unspecified_induce,
|
27 |
+
LF_d_following_c,
|
28 |
+
LF_d_induced_by_c,
|
29 |
+
LF_d_induced_by_c_tight,
|
30 |
+
LF_d_treat_c,
|
31 |
+
LF_develop_d_following_c,
|
32 |
+
LF_far_c_d,
|
33 |
+
LF_far_d_c,
|
34 |
+
LF_improve_before_disease,
|
35 |
+
LF_in_ctd_therapy,
|
36 |
+
LF_in_ctd_marker,
|
37 |
+
LF_in_patient_with,
|
38 |
+
LF_induce,
|
39 |
+
LF_induce_name,
|
40 |
+
LF_induced_other,
|
41 |
+
LF_level,
|
42 |
+
LF_measure,
|
43 |
+
LF_neg_d,
|
44 |
+
LF_risk_d,
|
45 |
+
LF_treat_d,
|
46 |
+
LF_uncertain,
|
47 |
+
LF_weak_assertions,
|
48 |
+
]
|
49 |
+
|
50 |
+
|
51 |
+
##### Distant supervision approaches
|
52 |
+
# We'll use the [Comparative Toxicogenomics Database](http://ctdbase.org/) (CTD) for distant supervision.
|
53 |
+
# The CTD lists chemical-condition entity pairs under three categories: therapy, marker, and unspecified.
|
54 |
+
# Therapy means the chemical treats the condition, marker means the chemical is typically present with the condition,
|
55 |
+
# and unspecified is...unspecified. We can write LFs based on these categories.
|
56 |
+
|
57 |
+
### LF_in_ctd_unspecified
|
58 |
+
def LF_in_ctd_unspecified(c):
|
59 |
+
return -1 * cand_in_ctd_unspecified(c)
|
60 |
+
|
61 |
+
### LF_in_ctd_therapy
|
62 |
+
def LF_in_ctd_therapy(c):
|
63 |
+
return -1 * cand_in_ctd_therapy(c)
|
64 |
+
|
65 |
+
### LF_in_ctd_marker
|
66 |
+
def LF_in_ctd_marker(c):
|
67 |
+
return cand_in_ctd_marker(c)
|
68 |
+
|
69 |
+
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
##### Text pattern approaches
|
74 |
+
# Now we'll use some LF helpers to create LFs based on indicative text patterns.
|
75 |
+
# We came up with these rules by using the viewer to examine training candidates and noting frequent patterns.
|
76 |
+
|
77 |
+
import re
|
78 |
+
from snorkel.lf_helpers import (
|
79 |
+
get_tagged_text,
|
80 |
+
rule_regex_search_tagged_text,
|
81 |
+
rule_regex_search_btw_AB,
|
82 |
+
rule_regex_search_btw_BA,
|
83 |
+
rule_regex_search_before_A,
|
84 |
+
rule_regex_search_before_B,
|
85 |
+
)
|
86 |
+
|
87 |
+
# List to parenthetical
|
88 |
+
def ltp(x):
|
89 |
+
return '(' + '|'.join(x) + ')'
|
90 |
+
|
91 |
+
### LF_induce
|
92 |
+
def LF_induce(c):
|
93 |
+
return 1 if re.search(r'{{A}}.{0,20}induc.{0,20}{{B}}', get_tagged_text(c), flags=re.I) else 0
|
94 |
+
|
95 |
+
### LF_d_induced_by_c
|
96 |
+
causal_past = ['induced', 'caused', 'due']
|
97 |
+
def LF_d_induced_by_c(c):
|
98 |
+
return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(causal_past) + '.{0,9}(by|to).{0,50}', 1)
|
99 |
+
|
100 |
+
### LF_d_induced_by_c_tight
|
101 |
+
def LF_d_induced_by_c_tight(c):
|
102 |
+
return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(causal_past) + ' (by|to) ', 1)
|
103 |
+
|
104 |
+
### LF_induce_name
|
105 |
+
def LF_induce_name(c):
|
106 |
+
return 1 if 'induc' in c.chemical.get_span().lower() else 0
|
107 |
+
|
108 |
+
### LF_c_cause_d
|
109 |
+
causal = ['cause[sd]?', 'induce[sd]?', 'associated with']
|
110 |
+
def LF_c_cause_d(c):
|
111 |
+
return 1 if (
|
112 |
+
re.search(r'{{A}}.{0,50} ' + ltp(causal) + '.{0,50}{{B}}', get_tagged_text(c), re.I)
|
113 |
+
and not re.search('{{A}}.{0,50}(not|no).{0,20}' + ltp(causal) + '.{0,50}{{B}}', get_tagged_text(c), re.I)
|
114 |
+
) else 0
|
115 |
+
|
116 |
+
### LF_d_treat_c
|
117 |
+
treat = ['treat', 'effective', 'prevent', 'resistant', 'slow', 'promise', 'therap']
|
118 |
+
def LF_d_treat_c(c):
|
119 |
+
return rule_regex_search_btw_BA(c, '.{0,50}' + ltp(treat) + '.{0,50}', -1)
|
120 |
+
|
121 |
+
### LF_c_treat_d
|
122 |
+
def LF_c_treat_d(c):
|
123 |
+
return rule_regex_search_btw_AB(c, '.{0,50}' + ltp(treat) + '.{0,50}', -1)
|
124 |
+
|
125 |
+
### LF_treat_d
|
126 |
+
def LF_treat_d(c):
|
127 |
+
return rule_regex_search_before_B(c, ltp(treat) + '.{0,50}', -1)
|
128 |
+
|
129 |
+
### LF_c_treat_d_wide
|
130 |
+
def LF_c_treat_d_wide(c):
|
131 |
+
return rule_regex_search_btw_AB(c, '.{0,200}' + ltp(treat) + '.{0,200}', -1)
|
132 |
+
|
133 |
+
### LF_c_d
|
134 |
+
def LF_c_d(c):
|
135 |
+
return 1 if ('{{A}} {{B}}' in get_tagged_text(c)) else 0
|
136 |
+
|
137 |
+
### LF_c_induced_d
|
138 |
+
def LF_c_induced_d(c):
|
139 |
+
return 1 if (
|
140 |
+
('{{A}} {{B}}' in get_tagged_text(c)) and
|
141 |
+
(('-induc' in c[0].get_span().lower()) or ('-assoc' in c[0].get_span().lower()))
|
142 |
+
) else 0
|
143 |
+
|
144 |
+
### LF_improve_before_disease
|
145 |
+
def LF_improve_before_disease(c):
|
146 |
+
return rule_regex_search_before_B(c, 'improv.*', -1)
|
147 |
+
|
148 |
+
### LF_in_patient_with
|
149 |
+
pat_terms = ['in a patient with ', 'in patients with']
|
150 |
+
def LF_in_patient_with(c):
|
151 |
+
return -1 if re.search(ltp(pat_terms) + '{{B}}', get_tagged_text(c), flags=re.I) else 0
|
152 |
+
|
153 |
+
### LF_uncertain
|
154 |
+
uncertain = ['combin', 'possible', 'unlikely']
|
155 |
+
def LF_uncertain(c):
|
156 |
+
return rule_regex_search_before_A(c, ltp(uncertain) + '.*', -1)
|
157 |
+
|
158 |
+
### LF_induced_other
|
159 |
+
def LF_induced_other(c):
|
160 |
+
return rule_regex_search_tagged_text(c, '{{A}}.{20,1000}-induced {{B}}', -1)
|
161 |
+
|
162 |
+
### LF_far_c_d
|
163 |
+
def LF_far_c_d(c):
|
164 |
+
return rule_regex_search_btw_AB(c, '.{100,5000}', -1)
|
165 |
+
|
166 |
+
### LF_far_d_c
|
167 |
+
def LF_far_d_c(c):
|
168 |
+
return rule_regex_search_btw_BA(c, '.{100,5000}', -1)
|
169 |
+
|
170 |
+
### LF_risk_d
|
171 |
+
def LF_risk_d(c):
|
172 |
+
return rule_regex_search_before_B(c, 'risk of ', 1)
|
173 |
+
|
174 |
+
### LF_develop_d_following_c
|
175 |
+
def LF_develop_d_following_c(c):
|
176 |
+
return 1 if re.search(r'develop.{0,25}{{B}}.{0,25}following.{0,25}{{A}}', get_tagged_text(c), flags=re.I) else 0
|
177 |
+
|
178 |
+
### LF_d_following_c
|
179 |
+
procedure, following = ['inject', 'administrat'], ['following']
|
180 |
+
def LF_d_following_c(c):
|
181 |
+
return 1 if re.search('{{B}}.{0,50}' + ltp(following) + '.{0,20}{{A}}.{0,50}' + ltp(procedure), get_tagged_text(c), flags=re.I) else 0
|
182 |
+
|
183 |
+
### LF_measure
|
184 |
+
def LF_measure(c):
|
185 |
+
return -1 if re.search('measur.{0,75}{{A}}', get_tagged_text(c), flags=re.I) else 0
|
186 |
+
|
187 |
+
### LF_level
|
188 |
+
def LF_level(c):
|
189 |
+
return -1 if re.search('{{A}}.{0,25} level', get_tagged_text(c), flags=re.I) else 0
|
190 |
+
|
191 |
+
### LF_neg_d
|
192 |
+
def LF_neg_d(c):
|
193 |
+
return -1 if re.search('(none|not|no) .{0,25}{{B}}', get_tagged_text(c), flags=re.I) else 0
|
194 |
+
|
195 |
+
### LF_weak_assertions
|
196 |
+
WEAK_PHRASES = ['none', 'although', 'was carried out', 'was conducted',
|
197 |
+
'seems', 'suggests', 'risk', 'implicated',
|
198 |
+
'the aim', 'to (investigate|assess|study)']
|
199 |
+
|
200 |
+
WEAK_RGX = r'|'.join(WEAK_PHRASES)
|
201 |
+
def LF_weak_assertions(c):
|
202 |
+
return -1 if re.search(WEAK_RGX, get_tagged_text(c), flags=re.I) else 0
|
203 |
+
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
##### Composite LFs
|
210 |
+
|
211 |
+
# The following LFs take some of the strongest distant supervision and text pattern LFs,
|
212 |
+
# and combine them to form more specific LFs. These LFs introduce some obvious
|
213 |
+
# dependencies within the LF set, which we will model later.
|
214 |
+
|
215 |
+
### LF_ctd_marker_c_d
|
216 |
+
def LF_ctd_marker_c_d(c):
|
217 |
+
return LF_c_d(c) * cand_in_ctd_marker(c)
|
218 |
+
|
219 |
+
### LF_ctd_marker_induce
|
220 |
+
def LF_ctd_marker_induce(c):
|
221 |
+
return (LF_c_induced_d(c) or LF_d_induced_by_c_tight(c)) * cand_in_ctd_marker(c)
|
222 |
+
|
223 |
+
### LF_ctd_therapy_treat
|
224 |
+
def LF_ctd_therapy_treat(c):
|
225 |
+
return LF_c_treat_d_wide(c) * cand_in_ctd_therapy(c)
|
226 |
+
|
227 |
+
### LF_ctd_unspecified_treat
|
228 |
+
def LF_ctd_unspecified_treat(c):
|
229 |
+
return LF_c_treat_d_wide(c) * cand_in_ctd_unspecified(c)
|
230 |
+
|
231 |
+
### LF_ctd_unspecified_induce
|
232 |
+
def LF_ctd_unspecified_induce(c):
|
233 |
+
return (LF_c_induced_d(c) or LF_d_induced_by_c_tight(c)) * cand_in_ctd_unspecified(c)
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
|
238 |
+
|
239 |
+
|
240 |
+
##### Rules based on context hierarchy
|
241 |
+
# These last two rules will make use of the context hierarchy.
|
242 |
+
# The first checks if there is a chemical mention much closer to the candidate's disease mention
|
243 |
+
# than the candidate's chemical mention. The second does the analog for diseases.
|
244 |
+
|
245 |
+
### LF_closer_chem
|
246 |
+
def LF_closer_chem(c):
|
247 |
+
# Get distance between chemical and disease
|
248 |
+
chem_start, chem_end = c.chemical.get_word_start(), c.chemical.get_word_end()
|
249 |
+
dis_start, dis_end = c.disease.get_word_start(), c.disease.get_word_end()
|
250 |
+
if dis_start < chem_start:
|
251 |
+
dist = chem_start - dis_end
|
252 |
+
else:
|
253 |
+
dist = dis_start - chem_end
|
254 |
+
# Try to find chemical closer than @dist/2 in either direction
|
255 |
+
sent = c.get_parent()
|
256 |
+
closest_other_chem = float('inf')
|
257 |
+
for i in range(dis_end, min(len(sent.words), dis_end + dist // 2)):
|
258 |
+
et, cid = sent.entity_types[i], sent.entity_cids[i]
|
259 |
+
if et == 'Chemical' and cid != sent.entity_cids[chem_start]:
|
260 |
+
return -1
|
261 |
+
for i in range(max(0, dis_start - dist // 2), dis_start):
|
262 |
+
et, cid = sent.entity_types[i], sent.entity_cids[i]
|
263 |
+
if et == 'Chemical' and cid != sent.entity_cids[chem_start]:
|
264 |
+
return -1
|
265 |
+
return 0
|
266 |
+
|
267 |
+
### LF_closer_dis
|
268 |
+
def LF_closer_dis(c):
|
269 |
+
# Get distance between chemical and disease
|
270 |
+
chem_start, chem_end = c.chemical.get_word_start(), c.chemical.get_word_end()
|
271 |
+
dis_start, dis_end = c.disease.get_word_start(), c.disease.get_word_end()
|
272 |
+
if dis_start < chem_start:
|
273 |
+
dist = chem_start - dis_end
|
274 |
+
else:
|
275 |
+
dist = dis_start - chem_end
|
276 |
+
# Try to find chemical disease than @dist/8 in either direction
|
277 |
+
sent = c.get_parent()
|
278 |
+
for i in range(chem_end, min(len(sent.words), chem_end + dist // 8)):
|
279 |
+
et, cid = sent.entity_types[i], sent.entity_cids[i]
|
280 |
+
if et == 'Disease' and cid != sent.entity_cids[dis_start]:
|
281 |
+
return -1
|
282 |
+
for i in range(max(0, chem_start - dist // 8), chem_start):
|
283 |
+
et, cid = sent.entity_types[i], sent.entity_cids[i]
|
284 |
+
if et == 'Disease' and cid != sent.entity_cids[dis_start]:
|
285 |
+
return -1
|
286 |
return 0
|
cdr/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4737198366306565a7ae23ba29b20b3e457021a686c57232306b04a10a0ebfe8
|
3 |
+
size 3642958
|
cdr/train.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aef685319701179e17c148ac729bc0cbdace9c4355a784384b5c4da1f395347e
|
3 |
+
size 6672792
|
cdr/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:14f2ab3e62df6515d20ee72941c6be26af0aa51e1324c9908e86808b26041a25
|
3 |
+
size 720463
|
census/labeled_ids.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:15e139f67ba6082723814578d4c5a1b7d14fbce2ebe198f53cc8423da7789cde
|
3 |
-
size 1538
|
|
|
|
|
|
|
|
census/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a47dd6436974a1277e443cc4fcef641ff8fd6052dfe19ba7df3b80ba58b14d12
|
3 |
+
size 60700301
|
census/train.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e0a17137af9d912090886b5b4427cd0eb5a58c49cde6cfea748d86d29ff24c7b
|
3 |
+
size 37587925
|
census/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:79a58b9023e30fd0ea23cf215b8546d93ee188373ba1768741c9f93e884ae76f
|
3 |
+
size 20729303
|
chemprot/readme.txt
CHANGED
@@ -1,213 +1,213 @@
|
|
1 |
-
Chemprot Relation Classification Dataset
|
2 |
-
https://github.com/yueyu1030/COSINE/tree/main/data/chemprot
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
# Labels
|
7 |
-
|
8 |
-
"0": "Part of",
|
9 |
-
"1": "Regulator",
|
10 |
-
"2": "Upregulator",
|
11 |
-
"3": "Downregulator",
|
12 |
-
"4": "Agonist",
|
13 |
-
"5": "Antagonist",
|
14 |
-
"6": "Modulator",
|
15 |
-
"7": "Cofactor",
|
16 |
-
"8": "Substrate/Product",
|
17 |
-
"9": "NOT"
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
# Labeling Functions
|
23 |
-
|
24 |
-
|
25 |
-
## Part of
|
26 |
-
@labeling_function()
|
27 |
-
def lf_amino_acid(x):
|
28 |
-
return 1 if 'amino acid' in x.sentence.lower() else ABSTAIN
|
29 |
-
|
30 |
-
@labeling_function()
|
31 |
-
def lf_replace(x):
|
32 |
-
return 1 if 'replace' in x.sentence.lower() else ABSTAIN
|
33 |
-
|
34 |
-
@labeling_function()
|
35 |
-
def lf_mutant(x):
|
36 |
-
return 1 if 'mutant' in x.sentence.lower() or 'mutat' in x.sentence.lower() else ABSTAIN
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
## Regulator
|
46 |
-
@labeling_function()
|
47 |
-
def lf_bind(x):
|
48 |
-
return 2 if 'bind' in x.sentence.lower() else ABSTAIN
|
49 |
-
|
50 |
-
@labeling_function()
|
51 |
-
def lf_interact(x):
|
52 |
-
return 2 if 'interact' in x.sentence.lower() else ABSTAIN
|
53 |
-
|
54 |
-
@labeling_function()
|
55 |
-
def lf_affinity(x):
|
56 |
-
return 2 if 'affinit' in x.sentence.lower() else ABSTAIN
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
## Upregulator
|
67 |
-
# Activator
|
68 |
-
@labeling_function()
|
69 |
-
def lf_activate(x):
|
70 |
-
return 3 if 'activat' in x.sentence.lower() else ABSTAIN
|
71 |
-
|
72 |
-
@labeling_function()
|
73 |
-
def lf_increase(x):
|
74 |
-
return 3 if 'increas' in x.sentence.lower() else ABSTAIN
|
75 |
-
|
76 |
-
@labeling_function()
|
77 |
-
def lf_induce(x):
|
78 |
-
return 3 if 'induc' in x.sentence.lower() else ABSTAIN
|
79 |
-
|
80 |
-
@labeling_function()
|
81 |
-
def lf_stimulate(x):
|
82 |
-
return 3 if 'stimulat' in x.sentence.lower() else ABSTAIN
|
83 |
-
|
84 |
-
@labeling_function()
|
85 |
-
def lf_upregulate(x):
|
86 |
-
return 3 if 'upregulat' in x.sentence.lower() else ABSTAIN
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
## Downregulator
|
100 |
-
@labeling_function()
|
101 |
-
def lf_downregulate(x):
|
102 |
-
return 4 if 'downregulat' in x.sentence.lower() or 'down-regulat' in x.sentence.lower() else ABSTAIN
|
103 |
-
|
104 |
-
@labeling_function()
|
105 |
-
def lf_reduce(x):
|
106 |
-
return 4 if 'reduc' in x.sentence.lower() else ABSTAIN
|
107 |
-
|
108 |
-
@labeling_function()
|
109 |
-
def lf_inhibit(x):
|
110 |
-
return 4 if 'inhibit' in x.sentence.lower() else ABSTAIN
|
111 |
-
|
112 |
-
@labeling_function()
|
113 |
-
def lf_decrease(x):
|
114 |
-
return 4 if 'decreas' in x.sentence.lower() else ABSTAIN
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
## Agonist
|
127 |
-
@labeling_function()
|
128 |
-
def lf_agonist(x):
|
129 |
-
return 5 if ' agoni' in x.sentence.lower() or "\tagoni" in x.sentence.lower() else ABSTAIN
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
## Antagonist
|
140 |
-
@labeling_function()
|
141 |
-
def lf_antagonist(x):
|
142 |
-
return 6 if 'antagon' in x.sentence.lower() else ABSTAIN
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
## Modulator
|
153 |
-
@labeling_function()
|
154 |
-
def lf_modulate(x):
|
155 |
-
return 7 if 'modulat' in x.sentence.lower() else ABSTAIN
|
156 |
-
|
157 |
-
@labeling_function()
|
158 |
-
def lf_allosteric(x):
|
159 |
-
return 7 if 'allosteric' in x.sentence.lower() else ABSTAIN
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
## Cofactor
|
170 |
-
@labeling_function()
|
171 |
-
def lf_cofactor(x):
|
172 |
-
return 8 if 'cofactor' in x.sentence.lower() else ABSTAIN
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
## Substrate/Product
|
182 |
-
@labeling_function()
|
183 |
-
def lf_substrate(x):
|
184 |
-
return 9 if 'substrate' in x.sentence.lower() else ABSTAIN
|
185 |
-
|
186 |
-
@labeling_function()
|
187 |
-
def lf_transport(x):
|
188 |
-
return 9 if 'transport' in x.sentence.lower() else ABSTAIN
|
189 |
-
|
190 |
-
@labeling_function()
|
191 |
-
def lf_catalyze(x):
|
192 |
-
return 9 if 'catalyz' in x.sentence.lower() or 'catalys' in x.sentence.lower() else ABSTAIN
|
193 |
-
|
194 |
-
@labeling_function()
|
195 |
-
def lf_product(x):
|
196 |
-
return 9 if "produc" in x.sentence.lower() else ABSTAIN
|
197 |
-
|
198 |
-
@labeling_function()
|
199 |
-
def lf_convert(x):
|
200 |
-
return 9 if "conver" in x.sentence.lower() else ABSTAIN
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
## NOT
|
211 |
-
@labeling_function()
|
212 |
-
def lf_not(x):
|
213 |
return 10 if 'not' in x.sentence.lower() else ABSTAIN
|
|
|
1 |
+
Chemprot Relation Classification Dataset
|
2 |
+
https://github.com/yueyu1030/COSINE/tree/main/data/chemprot
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
# Labels
|
7 |
+
|
8 |
+
"0": "Part of",
|
9 |
+
"1": "Regulator",
|
10 |
+
"2": "Upregulator",
|
11 |
+
"3": "Downregulator",
|
12 |
+
"4": "Agonist",
|
13 |
+
"5": "Antagonist",
|
14 |
+
"6": "Modulator",
|
15 |
+
"7": "Cofactor",
|
16 |
+
"8": "Substrate/Product",
|
17 |
+
"9": "NOT"
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
# Labeling Functions
|
23 |
+
|
24 |
+
|
25 |
+
## Part of
|
26 |
+
@labeling_function()
|
27 |
+
def lf_amino_acid(x):
|
28 |
+
return 1 if 'amino acid' in x.sentence.lower() else ABSTAIN
|
29 |
+
|
30 |
+
@labeling_function()
|
31 |
+
def lf_replace(x):
|
32 |
+
return 1 if 'replace' in x.sentence.lower() else ABSTAIN
|
33 |
+
|
34 |
+
@labeling_function()
|
35 |
+
def lf_mutant(x):
|
36 |
+
return 1 if 'mutant' in x.sentence.lower() or 'mutat' in x.sentence.lower() else ABSTAIN
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
## Regulator
|
46 |
+
@labeling_function()
|
47 |
+
def lf_bind(x):
|
48 |
+
return 2 if 'bind' in x.sentence.lower() else ABSTAIN
|
49 |
+
|
50 |
+
@labeling_function()
|
51 |
+
def lf_interact(x):
|
52 |
+
return 2 if 'interact' in x.sentence.lower() else ABSTAIN
|
53 |
+
|
54 |
+
@labeling_function()
|
55 |
+
def lf_affinity(x):
|
56 |
+
return 2 if 'affinit' in x.sentence.lower() else ABSTAIN
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
|
64 |
+
|
65 |
+
|
66 |
+
## Upregulator
|
67 |
+
# Activator
|
68 |
+
@labeling_function()
|
69 |
+
def lf_activate(x):
|
70 |
+
return 3 if 'activat' in x.sentence.lower() else ABSTAIN
|
71 |
+
|
72 |
+
@labeling_function()
|
73 |
+
def lf_increase(x):
|
74 |
+
return 3 if 'increas' in x.sentence.lower() else ABSTAIN
|
75 |
+
|
76 |
+
@labeling_function()
|
77 |
+
def lf_induce(x):
|
78 |
+
return 3 if 'induc' in x.sentence.lower() else ABSTAIN
|
79 |
+
|
80 |
+
@labeling_function()
|
81 |
+
def lf_stimulate(x):
|
82 |
+
return 3 if 'stimulat' in x.sentence.lower() else ABSTAIN
|
83 |
+
|
84 |
+
@labeling_function()
|
85 |
+
def lf_upregulate(x):
|
86 |
+
return 3 if 'upregulat' in x.sentence.lower() else ABSTAIN
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
|
98 |
+
|
99 |
+
## Downregulator
|
100 |
+
@labeling_function()
|
101 |
+
def lf_downregulate(x):
|
102 |
+
return 4 if 'downregulat' in x.sentence.lower() or 'down-regulat' in x.sentence.lower() else ABSTAIN
|
103 |
+
|
104 |
+
@labeling_function()
|
105 |
+
def lf_reduce(x):
|
106 |
+
return 4 if 'reduc' in x.sentence.lower() else ABSTAIN
|
107 |
+
|
108 |
+
@labeling_function()
|
109 |
+
def lf_inhibit(x):
|
110 |
+
return 4 if 'inhibit' in x.sentence.lower() else ABSTAIN
|
111 |
+
|
112 |
+
@labeling_function()
|
113 |
+
def lf_decrease(x):
|
114 |
+
return 4 if 'decreas' in x.sentence.lower() else ABSTAIN
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
|
126 |
+
## Agonist
|
127 |
+
@labeling_function()
|
128 |
+
def lf_agonist(x):
|
129 |
+
return 5 if ' agoni' in x.sentence.lower() or "\tagoni" in x.sentence.lower() else ABSTAIN
|
130 |
+
|
131 |
+
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
|
136 |
+
|
137 |
+
|
138 |
+
|
139 |
+
## Antagonist
|
140 |
+
@labeling_function()
|
141 |
+
def lf_antagonist(x):
|
142 |
+
return 6 if 'antagon' in x.sentence.lower() else ABSTAIN
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
|
152 |
+
## Modulator
|
153 |
+
@labeling_function()
|
154 |
+
def lf_modulate(x):
|
155 |
+
return 7 if 'modulat' in x.sentence.lower() else ABSTAIN
|
156 |
+
|
157 |
+
@labeling_function()
|
158 |
+
def lf_allosteric(x):
|
159 |
+
return 7 if 'allosteric' in x.sentence.lower() else ABSTAIN
|
160 |
+
|
161 |
+
|
162 |
+
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
|
168 |
+
|
169 |
+
## Cofactor
|
170 |
+
@labeling_function()
|
171 |
+
def lf_cofactor(x):
|
172 |
+
return 8 if 'cofactor' in x.sentence.lower() else ABSTAIN
|
173 |
+
|
174 |
+
|
175 |
+
|
176 |
+
|
177 |
+
|
178 |
+
|
179 |
+
|
180 |
+
|
181 |
+
## Substrate/Product
|
182 |
+
@labeling_function()
|
183 |
+
def lf_substrate(x):
|
184 |
+
return 9 if 'substrate' in x.sentence.lower() else ABSTAIN
|
185 |
+
|
186 |
+
@labeling_function()
|
187 |
+
def lf_transport(x):
|
188 |
+
return 9 if 'transport' in x.sentence.lower() else ABSTAIN
|
189 |
+
|
190 |
+
@labeling_function()
|
191 |
+
def lf_catalyze(x):
|
192 |
+
return 9 if 'catalyz' in x.sentence.lower() or 'catalys' in x.sentence.lower() else ABSTAIN
|
193 |
+
|
194 |
+
@labeling_function()
|
195 |
+
def lf_product(x):
|
196 |
+
return 9 if "produc" in x.sentence.lower() else ABSTAIN
|
197 |
+
|
198 |
+
@labeling_function()
|
199 |
+
def lf_convert(x):
|
200 |
+
return 9 if "conver" in x.sentence.lower() else ABSTAIN
|
201 |
+
|
202 |
+
|
203 |
+
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
|
210 |
+
## NOT
|
211 |
+
@labeling_function()
|
212 |
+
def lf_not(x):
|
213 |
return 10 if 'not' in x.sentence.lower() else ABSTAIN
|
chemprot/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:946949dffb01347c1b7e455488239f533d583afb5c0cfbc161391d74bc2783ea
|
3 |
+
size 1573325
|
chemprot/train.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:44bde7cae6ab2e167cb3b97b1c120f758384cf53a8d96fb1b1d70a4fb634a98e
|
3 |
+
size 12594569
|
chemprot/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4843813b4fdd34bc71fea9e8fa6c7180ecf2bdd21c43ffb0e9c6f3d13b9369cf
|
3 |
+
size 1575355
|
commercial/readme.txt
CHANGED
@@ -1,22 +1,22 @@
|
|
1 |
-
Commercial
|
2 |
-
|
3 |
-
# Source:
|
4 |
-
|
5 |
-
D. Y. Fu, M. F. Chen, F. Sala, S. M. Hooper, K. Fatahalian, and C. Ré. Fast and three-rious: Speeding up weak supervision with triplet methods. In ICML, pages 3280–3291, 2020.
|
6 |
-
|
7 |
-
|
8 |
-
# Labels:
|
9 |
-
0: negative (the graph is not commercials)
|
10 |
-
|
11 |
-
1: positive (the graph is commercials)
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
4 Labeling functions
|
16 |
-
|
17 |
-
LFs: In this dataset, there is a strong signal for the presence or absence of commercials in pixel histograms and the text; in particular, commercials are book-ended on either side by sequences of black frames, and commercial segments tend to have mixed-case or missing transcripts (whereas news segments are in all caps). We use these signals to build the weak supervision sources.
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
1 |
+
Commercial
|
2 |
+
|
3 |
+
# Source:
|
4 |
+
|
5 |
+
D. Y. Fu, M. F. Chen, F. Sala, S. M. Hooper, K. Fatahalian, and C. Ré. Fast and three-rious: Speeding up weak supervision with triplet methods. In ICML, pages 3280–3291, 2020.
|
6 |
+
|
7 |
+
|
8 |
+
# Labels:
|
9 |
+
0: negative (the graph is not commercials)
|
10 |
+
|
11 |
+
1: positive (the graph is commercials)
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
4 Labeling functions
|
16 |
+
|
17 |
+
LFs: In this dataset, there is a strong signal for the presence or absence of commercials in pixel histograms and the text; in particular, commercials are book-ended on either side by sequences of black frames, and commercial segments tend to have mixed-case or missing transcripts (whereas news segments are in all caps). We use these signals to build the weak supervision sources.
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
commercial/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b77b296f9d2a067afdd837ba0060aedc2f82ee5b7d083cc26d571ff4d70f0b88
|
3 |
+
size 322526234
|
commercial/train.json
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:27d25bb8c139e2ea14e619ca72c647d62dadb3645ab0e40325db4e1653827a25
|
3 |
-
size 2761720065
|
|
|
|
|
|
|
|
commercial/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b63485b820d5942036e3403dc0cda177dc484bd2307c205b23b2e88ac7ecf077
|
3 |
+
size 407895122
|
conll/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:558fd0aff43c88df1d9b7cb77ca43b8ea48387c6a8256c07f91850fcbefac516
|
3 |
+
size 12269564
|
conll/train.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d21ff7aba766a8ef9dbf342e99de47086cca25b2b64bc698010324e90ef91b3d
|
3 |
+
size 53648877
|
conll/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6eea27ff5576fe552f38eeda3cc158fcb16308bf993894d5c9513510d33ed350
|
3 |
+
size 13500610
|
imdb/readme.txt
CHANGED
@@ -1,60 +1,60 @@
|
|
1 |
-
IMDB Sentiment Classification
|
2 |
-
|
3 |
-
https://github.com/weakrules/Denoise-multi-weak-sources/tree/master/rules-noisy-labels/IMDB
|
4 |
-
|
5 |
-
# Labels
|
6 |
-
|
7 |
-
"0": "Negative",
|
8 |
-
"1": "Positive"
|
9 |
-
|
10 |
-
|
11 |
-
# Labeling functions
|
12 |
-
|
13 |
-
lfs = [
|
14 |
-
expression_nexttime,
|
15 |
-
keyword_compare,
|
16 |
-
keyword_general,
|
17 |
-
keyword_finish,
|
18 |
-
keyword_plot
|
19 |
-
]
|
20 |
-
|
21 |
-
|
22 |
-
# lf - expression_nexttime
|
23 |
-
|
24 |
-
expression_nexttime = make_expression_lf(name="expression_nexttime",
|
25 |
-
pre_pos=["will ", " ll ", "would ", " d ", "can t wait to "],
|
26 |
-
expression=[" next time", " again", " rewatch", " anymore", " rewind"])
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
# lf - keyword_compare
|
32 |
-
|
33 |
-
keyword_compare = make_keyword_lf(name="keyword_compare",
|
34 |
-
keywords_pos=[],
|
35 |
-
keywords_neg=[" than this", " than the film", " than the movie"])
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
# lf - keyword_general
|
40 |
-
|
41 |
-
keyword_general = make_keyword_lf(name="keyword_general",
|
42 |
-
keywords_pos=["masterpiece", "outstanding", "perfect", "great", "good", "nice", "best", "excellent", "worthy", "awesome", "enjoy", "positive", "pleasant", "wonderful", "amazing", "superb", "fantastic", "marvellous", "fabulous"],
|
43 |
-
keywords_neg=["bad", "worst", "horrible", "awful", "terrible", "crap", "shit", "garbage", "rubbish", "waste"])
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
# lf - keyword_finish
|
48 |
-
|
49 |
-
keyword_finish = make_keyword_lf(name="keyword_finish",
|
50 |
-
keywords_pos=[],
|
51 |
-
keywords_neg=["fast forward", "n t finish"])
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
# lf - keyword_plot
|
57 |
-
|
58 |
-
keyword_plot = make_keyword_lf(name="keyword_plot",
|
59 |
-
keywords_pos=["well written", "absorbing", "attractive", "innovative", "instructive", "interesting", "touching", "moving"],
|
60 |
keywords_neg=["to sleep", "fell asleep", "boring", "dull", "plain"])
|
|
|
1 |
+
IMDB Sentiment Classification
|
2 |
+
|
3 |
+
https://github.com/weakrules/Denoise-multi-weak-sources/tree/master/rules-noisy-labels/IMDB
|
4 |
+
|
5 |
+
# Labels
|
6 |
+
|
7 |
+
"0": "Negative",
|
8 |
+
"1": "Positive"
|
9 |
+
|
10 |
+
|
11 |
+
# Labeling functions
|
12 |
+
|
13 |
+
lfs = [
|
14 |
+
expression_nexttime,
|
15 |
+
keyword_compare,
|
16 |
+
keyword_general,
|
17 |
+
keyword_finish,
|
18 |
+
keyword_plot
|
19 |
+
]
|
20 |
+
|
21 |
+
|
22 |
+
# lf - expression_nexttime
|
23 |
+
|
24 |
+
expression_nexttime = make_expression_lf(name="expression_nexttime",
|
25 |
+
pre_pos=["will ", " ll ", "would ", " d ", "can t wait to "],
|
26 |
+
expression=[" next time", " again", " rewatch", " anymore", " rewind"])
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
# lf - keyword_compare
|
32 |
+
|
33 |
+
keyword_compare = make_keyword_lf(name="keyword_compare",
|
34 |
+
keywords_pos=[],
|
35 |
+
keywords_neg=[" than this", " than the film", " than the movie"])
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
# lf - keyword_general
|
40 |
+
|
41 |
+
keyword_general = make_keyword_lf(name="keyword_general",
|
42 |
+
keywords_pos=["masterpiece", "outstanding", "perfect", "great", "good", "nice", "best", "excellent", "worthy", "awesome", "enjoy", "positive", "pleasant", "wonderful", "amazing", "superb", "fantastic", "marvellous", "fabulous"],
|
43 |
+
keywords_neg=["bad", "worst", "horrible", "awful", "terrible", "crap", "shit", "garbage", "rubbish", "waste"])
|
44 |
+
|
45 |
+
|
46 |
+
|
47 |
+
# lf - keyword_finish
|
48 |
+
|
49 |
+
keyword_finish = make_keyword_lf(name="keyword_finish",
|
50 |
+
keywords_pos=[],
|
51 |
+
keywords_neg=["fast forward", "n t finish"])
|
52 |
+
|
53 |
+
|
54 |
+
|
55 |
+
|
56 |
+
# lf - keyword_plot
|
57 |
+
|
58 |
+
keyword_plot = make_keyword_lf(name="keyword_plot",
|
59 |
+
keywords_pos=["well written", "absorbing", "attractive", "innovative", "instructive", "interesting", "touching", "moving"],
|
60 |
keywords_neg=["to sleep", "fell asleep", "boring", "dull", "plain"])
|
imdb/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5fd75f6935d7e0a99bd2cb667e9052ee30a1283fd529155b02cd5d0364834100
|
3 |
+
size 3319002
|
imdb/train.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:10c4c949f70392eedec3a2d225f2ada83b0c30ffb875a5cb975261c7e259a443
|
3 |
+
size 26840546
|
imdb/valid.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eb9cab9171b68628ed42b15f81ade895b50340ff556678dd36501c2e32cb6bc6
|
3 |
+
size 3436156
|
laptopreview/test.json
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:46482c4fec5fe3b30f186dd19742941f4bbb8664c9df13c8c2d009ed1aa54175
|
3 |
+
size 1074084
|