Datasets:
Tom Aarsen
commited on
Commit
•
ce350f3
1
Parent(s):
01ad4ad
Add 'document_id' and 'sentence_id' columns
Browse files- README.md +7 -3
- conll2003.py +12 -0
README.md
CHANGED
@@ -219,11 +219,13 @@ An example of 'train' looks as follows.
|
|
219 |
|
220 |
```
|
221 |
{
|
222 |
-
"chunk_tags": [11, 12, 12, 21, 13, 11, 11, 21, 13, 11, 12, 13, 11, 21, 22, 11, 12, 17, 11, 21, 17, 11, 12, 12, 21, 22, 22, 13, 11, 0],
|
223 |
"id": "0",
|
224 |
-
"
|
|
|
|
|
225 |
"pos_tags": [12, 22, 22, 38, 15, 22, 28, 38, 15, 16, 21, 35, 24, 35, 37, 16, 21, 15, 24, 41, 15, 16, 21, 21, 20, 37, 40, 35, 21, 7],
|
226 |
-
"
|
|
|
227 |
}
|
228 |
```
|
229 |
|
@@ -236,6 +238,8 @@ The data fields are the same among all splits.
|
|
236 |
|
237 |
#### conll2003
|
238 |
- `id`: a `string` feature.
|
|
|
|
|
239 |
- `tokens`: a `list` of `string` features.
|
240 |
- `pos_tags`: a `list` of classification labels (`int`). Full tagset with indices:
|
241 |
|
|
|
219 |
|
220 |
```
|
221 |
{
|
|
|
222 |
"id": "0",
|
223 |
+
"document_id": 1,
|
224 |
+
"sentence_id": 3,
|
225 |
+
"tokens": ["The", "European", "Commission", "said", "on", "Thursday", "it", "disagreed", "with", "German", "advice", "to", "consumers", "to", "shun", "British", "lamb", "until", "scientists", "determine", "whether", "mad", "cow", "disease", "can", "be", "transmitted", "to", "sheep", "."]
|
226 |
"pos_tags": [12, 22, 22, 38, 15, 22, 28, 38, 15, 16, 21, 35, 24, 35, 37, 16, 21, 15, 24, 41, 15, 16, 21, 21, 20, 37, 40, 35, 21, 7],
|
227 |
+
"ner_tags": [0, 3, 4, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
228 |
+
"chunk_tags": [11, 12, 12, 21, 13, 11, 11, 21, 13, 11, 12, 13, 11, 21, 22, 11, 12, 17, 11, 21, 17, 11, 12, 12, 21, 22, 22, 13, 11, 0],
|
229 |
}
|
230 |
```
|
231 |
|
|
|
238 |
|
239 |
#### conll2003
|
240 |
- `id`: a `string` feature.
|
241 |
+
- `document_id`: an `int32` feature tracking which document the sample is from.
|
242 |
+
- `sentence_id`: an `int32` feature tracking which sentence in this document the sample is from.
|
243 |
- `tokens`: a `list` of `string` features.
|
244 |
- `pos_tags`: a `list` of classification labels (`int`). Full tagset with indices:
|
245 |
|
conll2003.py
CHANGED
@@ -83,6 +83,8 @@ class Conll2003(datasets.GeneratorBasedBuilder):
|
|
83 |
features=datasets.Features(
|
84 |
{
|
85 |
"id": datasets.Value("string"),
|
|
|
|
|
86 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
87 |
"pos_tags": datasets.Sequence(
|
88 |
datasets.features.ClassLabel(
|
@@ -207,20 +209,28 @@ class Conll2003(datasets.GeneratorBasedBuilder):
|
|
207 |
logger.info("⏳ Generating examples from = %s", filepath)
|
208 |
with open(filepath, encoding="utf-8") as f:
|
209 |
guid = 0
|
|
|
|
|
210 |
tokens = []
|
211 |
pos_tags = []
|
212 |
chunk_tags = []
|
213 |
ner_tags = []
|
214 |
for line in f:
|
215 |
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
|
|
|
|
|
|
216 |
if tokens:
|
217 |
yield guid, {
|
218 |
"id": str(guid),
|
|
|
|
|
219 |
"tokens": tokens,
|
220 |
"pos_tags": pos_tags,
|
221 |
"chunk_tags": chunk_tags,
|
222 |
"ner_tags": ner_tags,
|
223 |
}
|
|
|
224 |
guid += 1
|
225 |
tokens = []
|
226 |
pos_tags = []
|
@@ -237,6 +247,8 @@ class Conll2003(datasets.GeneratorBasedBuilder):
|
|
237 |
if tokens:
|
238 |
yield guid, {
|
239 |
"id": str(guid),
|
|
|
|
|
240 |
"tokens": tokens,
|
241 |
"pos_tags": pos_tags,
|
242 |
"chunk_tags": chunk_tags,
|
|
|
83 |
features=datasets.Features(
|
84 |
{
|
85 |
"id": datasets.Value("string"),
|
86 |
+
"document_id": datasets.Value("int32"),
|
87 |
+
"sentence_id": datasets.Value("int32"),
|
88 |
"tokens": datasets.Sequence(datasets.Value("string")),
|
89 |
"pos_tags": datasets.Sequence(
|
90 |
datasets.features.ClassLabel(
|
|
|
209 |
logger.info("⏳ Generating examples from = %s", filepath)
|
210 |
with open(filepath, encoding="utf-8") as f:
|
211 |
guid = 0
|
212 |
+
document_id = 0
|
213 |
+
sentence_id = 0
|
214 |
tokens = []
|
215 |
pos_tags = []
|
216 |
chunk_tags = []
|
217 |
ner_tags = []
|
218 |
for line in f:
|
219 |
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
|
220 |
+
if line.startswith("-DOCSTART-"):
|
221 |
+
document_id += 1
|
222 |
+
sentence_id = 0
|
223 |
if tokens:
|
224 |
yield guid, {
|
225 |
"id": str(guid),
|
226 |
+
"document_id": document_id,
|
227 |
+
"sentence_id": sentence_id,
|
228 |
"tokens": tokens,
|
229 |
"pos_tags": pos_tags,
|
230 |
"chunk_tags": chunk_tags,
|
231 |
"ner_tags": ner_tags,
|
232 |
}
|
233 |
+
sentence_id += 1
|
234 |
guid += 1
|
235 |
tokens = []
|
236 |
pos_tags = []
|
|
|
247 |
if tokens:
|
248 |
yield guid, {
|
249 |
"id": str(guid),
|
250 |
+
"document_id": document_id,
|
251 |
+
"sentence_id": sentence_id,
|
252 |
"tokens": tokens,
|
253 |
"pos_tags": pos_tags,
|
254 |
"chunk_tags": chunk_tags,
|