Datasets:
Commit
•
4f1abfa
1
Parent(s):
4d2db87
Update data URL in pubmed dataset (#3692)
Browse files* Update data URL in pubmed dataset
* Update config name and version
* Update dummy data
* Delete metadata JSON
* Delete dummy data
* Add citation acknowledgement and clean
* Update hardcoded exception in CI test for pubmed dataset
* Revert "Delete dummy data"
This reverts commit 9d12f05c1dca14a14100075d598c20e7c3637b56.
Commit from https://github.com/huggingface/datasets/commit/5437925190ffc7ec532396e771e715e9fbd32a48
- dummy/{2021/1.0.0 → 2022/2.0.0}/dummy_data.zip +2 -2
- pubmed.py +6 -17
dummy/{2021/1.0.0 → 2022/2.0.0}/dummy_data.zip
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f434e2dda488f7c1cee83060a2500904d9773fb9bdb2d2ff2774b82fe69b2f71
|
3 |
+
size 1960
|
pubmed.py
CHANGED
@@ -12,7 +12,7 @@
|
|
12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
-
"""
|
16 |
|
17 |
|
18 |
import copy
|
@@ -24,11 +24,10 @@ import datasets
|
|
24 |
logger = datasets.logging.get_logger(__name__)
|
25 |
|
26 |
|
27 |
-
# Find for instance the citation on arxiv or on the dataset repo/website
|
28 |
_CITATION = """\
|
|
|
29 |
"""
|
30 |
|
31 |
-
# You can copy an official description
|
32 |
_DESCRIPTION = """\
|
33 |
NLM produces a baseline set of MEDLINE/PubMed citation records in XML format for download on an annual basis. The annual baseline is released in December of each year. Each day, NLM produces update files that include new, revised and deleted citations. See our documentation page for more information.
|
34 |
"""
|
@@ -37,10 +36,10 @@ _HOMEPAGE = "https://www.nlm.nih.gov/databases/download/pubmed_medline.html"
|
|
37 |
|
38 |
_LICENSE = ""
|
39 |
|
40 |
-
# TODO: Add link to the official dataset URLs here
|
41 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
42 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
43 |
-
|
|
|
44 |
|
45 |
|
46 |
# Copyright Ferry Boender, released under the MIT license.
|
@@ -147,7 +146,7 @@ class Pubmed(datasets.GeneratorBasedBuilder):
|
|
147 |
"""Pubmed citations records"""
|
148 |
|
149 |
BUILDER_CONFIGS = [
|
150 |
-
datasets.BuilderConfig(name="
|
151 |
]
|
152 |
|
153 |
# FILLED automatically from features
|
@@ -307,19 +306,10 @@ class Pubmed(datasets.GeneratorBasedBuilder):
|
|
307 |
)
|
308 |
self.fill_keys_from_features(features)
|
309 |
return datasets.DatasetInfo(
|
310 |
-
# This is the description that will appear on the datasets page.
|
311 |
description=_DESCRIPTION,
|
312 |
-
|
313 |
-
features=features, # Here we define them above because they are different between the two configurations
|
314 |
-
# If there's a common (input, target) tuple from the features,
|
315 |
-
# specify them here. They'll be used if as_supervised=True in
|
316 |
-
# builder.as_dataset.
|
317 |
-
supervised_keys=None,
|
318 |
-
# Homepage of the dataset for documentation
|
319 |
homepage=_HOMEPAGE,
|
320 |
-
# License for the dataset if available
|
321 |
license=_LICENSE,
|
322 |
-
# Citation for the dataset
|
323 |
citation=_CITATION,
|
324 |
)
|
325 |
|
@@ -329,7 +319,6 @@ class Pubmed(datasets.GeneratorBasedBuilder):
|
|
329 |
return [
|
330 |
datasets.SplitGenerator(
|
331 |
name=datasets.Split.TRAIN,
|
332 |
-
# These kwargs will be passed to _generate_examples
|
333 |
gen_kwargs={"filenames": dl_dir},
|
334 |
),
|
335 |
]
|
|
|
12 |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
# See the License for the specific language governing permissions and
|
14 |
# limitations under the License.
|
15 |
+
"""MEDLINE/PubMed data."""
|
16 |
|
17 |
|
18 |
import copy
|
|
|
24 |
logger = datasets.logging.get_logger(__name__)
|
25 |
|
26 |
|
|
|
27 |
_CITATION = """\
|
28 |
+
Courtesy of the U.S. National Library of Medicine.
|
29 |
"""
|
30 |
|
|
|
31 |
_DESCRIPTION = """\
|
32 |
NLM produces a baseline set of MEDLINE/PubMed citation records in XML format for download on an annual basis. The annual baseline is released in December of each year. Each day, NLM produces update files that include new, revised and deleted citations. See our documentation page for more information.
|
33 |
"""
|
|
|
36 |
|
37 |
_LICENSE = ""
|
38 |
|
|
|
39 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
40 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
41 |
+
# Note these URLs here are used by MockDownloadManager.create_dummy_data_list
|
42 |
+
_URLs = [f"ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed22n{i:04d}.xml.gz" for i in range(1, 1115)]
|
43 |
|
44 |
|
45 |
# Copyright Ferry Boender, released under the MIT license.
|
|
|
146 |
"""Pubmed citations records"""
|
147 |
|
148 |
BUILDER_CONFIGS = [
|
149 |
+
datasets.BuilderConfig(name="2022", description="The 2022 annual record", version=datasets.Version("2.0.0")),
|
150 |
]
|
151 |
|
152 |
# FILLED automatically from features
|
|
|
306 |
)
|
307 |
self.fill_keys_from_features(features)
|
308 |
return datasets.DatasetInfo(
|
|
|
309 |
description=_DESCRIPTION,
|
310 |
+
features=features,
|
|
|
|
|
|
|
|
|
|
|
|
|
311 |
homepage=_HOMEPAGE,
|
|
|
312 |
license=_LICENSE,
|
|
|
313 |
citation=_CITATION,
|
314 |
)
|
315 |
|
|
|
319 |
return [
|
320 |
datasets.SplitGenerator(
|
321 |
name=datasets.Split.TRAIN,
|
|
|
322 |
gen_kwargs={"filenames": dl_dir},
|
323 |
),
|
324 |
]
|