url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
46
51
id
int64
599M
1.18B
node_id
stringlengths
18
32
number
int64
1
4.03k
title
stringlengths
1
276
user
dict
labels
list
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
list
milestone
dict
comments
sequence
created_at
int64
1,587B
1,648B
updated_at
int64
1,587B
1,648B
closed_at
int64
1,587B
1,648B
author_association
stringclasses
3 values
active_lock_reason
null
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
null
draft
bool
2 classes
pull_request
dict
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/2720
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2720/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2720/comments
https://api.github.com/repos/huggingface/datasets/issues/2720/events
https://github.com/huggingface/datasets/pull/2720
954,024,426
MDExOlB1bGxSZXF1ZXN0Njk3OTgxNjMx
2,720
fix: 🐛 fix two typos
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,627,401,017,000
1,627,411,097,000
1,627,411,096,000
CONTRIBUTOR
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2720/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2720/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2720", "html_url": "https://github.com/huggingface/datasets/pull/2720", "diff_url": "https://github.com/huggingface/datasets/pull/2720.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2720.patch", "merged_at": 1627411096000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2719
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2719/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2719/comments
https://api.github.com/repos/huggingface/datasets/issues/2719/events
https://github.com/huggingface/datasets/issues/2719
953,932,416
MDU6SXNzdWU5NTM5MzI0MTY=
2,719
Use ETag in streaming mode to detect resource updates
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" }, { "id": 3470211881, "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer", "name": "dataset-viewer", "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,627,395,429,000
1,634,895,368,000
null
CONTRIBUTOR
null
**Is your feature request related to a problem? Please describe.** I want to cache data I generate from processing a dataset I've loaded in streaming mode, but I've currently no way to know if the remote data has been updated or not, thus I don't know when to invalidate my cache. **Describe the solution you'd like** Take the ETag of the data files into account and provide it (directly or through a hash) to give a signal that I can invalidate my cache. **Describe alternatives you've considered** None
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2719/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2719/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2718
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2718/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2718/comments
https://api.github.com/repos/huggingface/datasets/issues/2718/events
https://github.com/huggingface/datasets/pull/2718
953,360,663
MDExOlB1bGxSZXF1ZXN0Njk3NDE0NTQy
2,718
New documentation structure
{ "login": "stevhliu", "id": 59462357, "node_id": "MDQ6VXNlcjU5NDYyMzU3", "avatar_url": "https://avatars.githubusercontent.com/u/59462357?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stevhliu", "html_url": "https://github.com/stevhliu", "followers_url": "https://api.github.com/users/stevhliu/followers", "following_url": "https://api.github.com/users/stevhliu/following{/other_user}", "gists_url": "https://api.github.com/users/stevhliu/gists{/gist_id}", "starred_url": "https://api.github.com/users/stevhliu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stevhliu/subscriptions", "organizations_url": "https://api.github.com/users/stevhliu/orgs", "repos_url": "https://api.github.com/users/stevhliu/repos", "events_url": "https://api.github.com/users/stevhliu/events{/privacy}", "received_events_url": "https://api.github.com/users/stevhliu/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "I just did some minor changes + added some content in these sections: share, about arrow, about cache\r\n\r\nFeel free to mark this PR as ready for review ! :)", "I just separated the `Share` How-to page into three pages: share, dataset_script and dataset_card.\r\n\r\nThis way in the share page we can explain in more details how to share a community or a canonical dataset - focus in their differences and the steps to upload them.\r\n\r\nAlso given that making a dataset script or a dataset card both require several steps, I feel like it's better to have dedicated pages for them.\r\n\r\nLet me know what you think @stevhliu and others. We can still revert this change if you feel like it was better with everything in the same place.", "I just added some minor changes to match the style, fix typos, etc. Great work on the conceptual guides, I learned a lot from them and I'm sure they will help a lot of other people too!\r\n\r\nI am fine with splitting `Share` into three separate pages. I think this probably makes it easier for users to navigate, instead of having to scroll up and down on a really long single page.", "Thanks a lot for all the suggestions ! I'm doing the final changes based on the remaining comments, then we can merge and release v1.12 of `datasets` and the new documentation ^^", "Alright I think I took all the suggestions and comments into account :)\r\nThanks everyone for the help !" ]
1,627,341,313,000
1,631,553,653,000
1,631,553,652,000
MEMBER
null
Organize Datasets documentation into four documentation types to improve clarity and discoverability of content. **Content to add in the very short term (feel free to add anything I'm missing):** - A discussion on why Datasets uses Arrow that includes some context and background about why we use Arrow. Would also be great to talk about Datasets speed and performance here, and if you can share any benchmarking/tests you did, that would be awesome! Finally, a discussion about how memory-mapping frees the user from RAM constraints would be very helpful. - Explain why you would want to disable or override verifications when loading a dataset. - If possible, include a code sample of when the number of elements in the field of an output dictionary aren’t the same as the other fields in the output dictionary (taken from the [note](https://huggingface.co/docs/datasets/processing.html#augmenting-the-dataset) here).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2718/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2718/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2718", "html_url": "https://github.com/huggingface/datasets/pull/2718", "diff_url": "https://github.com/huggingface/datasets/pull/2718.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2718.patch", "merged_at": 1631553652000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2717
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2717/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2717/comments
https://api.github.com/repos/huggingface/datasets/issues/2717/events
https://github.com/huggingface/datasets/pull/2717
952,979,976
MDExOlB1bGxSZXF1ZXN0Njk3MDkzNDEx
2,717
Fix shuffle on IterableDataset that disables batching in case any functions were mapped
{ "login": "amankhandelia", "id": 7098967, "node_id": "MDQ6VXNlcjcwOTg5Njc=", "avatar_url": "https://avatars.githubusercontent.com/u/7098967?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amankhandelia", "html_url": "https://github.com/amankhandelia", "followers_url": "https://api.github.com/users/amankhandelia/followers", "following_url": "https://api.github.com/users/amankhandelia/following{/other_user}", "gists_url": "https://api.github.com/users/amankhandelia/gists{/gist_id}", "starred_url": "https://api.github.com/users/amankhandelia/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amankhandelia/subscriptions", "organizations_url": "https://api.github.com/users/amankhandelia/orgs", "repos_url": "https://api.github.com/users/amankhandelia/repos", "events_url": "https://api.github.com/users/amankhandelia/events{/privacy}", "received_events_url": "https://api.github.com/users/amankhandelia/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,627,310,542,000
1,627,322,654,000
1,627,317,006,000
CONTRIBUTOR
null
Made a very minor change to fix the issue#2716. Added the missing argument in the constructor call. As discussed in the bug report, the change is made to prevent the `shuffle` method call from resetting the value of `batched` attribute in `MappedExamplesIterable` Fix #2716.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2717/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2717/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2717", "html_url": "https://github.com/huggingface/datasets/pull/2717", "diff_url": "https://github.com/huggingface/datasets/pull/2717.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2717.patch", "merged_at": 1627317005000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2716
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2716/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2716/comments
https://api.github.com/repos/huggingface/datasets/issues/2716/events
https://github.com/huggingface/datasets/issues/2716
952,902,778
MDU6SXNzdWU5NTI5MDI3Nzg=
2,716
Calling shuffle on IterableDataset will disable batching in case any functions were mapped
{ "login": "amankhandelia", "id": 7098967, "node_id": "MDQ6VXNlcjcwOTg5Njc=", "avatar_url": "https://avatars.githubusercontent.com/u/7098967?v=4", "gravatar_id": "", "url": "https://api.github.com/users/amankhandelia", "html_url": "https://github.com/amankhandelia", "followers_url": "https://api.github.com/users/amankhandelia/followers", "following_url": "https://api.github.com/users/amankhandelia/following{/other_user}", "gists_url": "https://api.github.com/users/amankhandelia/gists{/gist_id}", "starred_url": "https://api.github.com/users/amankhandelia/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/amankhandelia/subscriptions", "organizations_url": "https://api.github.com/users/amankhandelia/orgs", "repos_url": "https://api.github.com/users/amankhandelia/repos", "events_url": "https://api.github.com/users/amankhandelia/events{/privacy}", "received_events_url": "https://api.github.com/users/amankhandelia/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi :) Good catch ! Feel free to open a PR if you want to contribute, this would be very welcome ;)", "Have raised the PR [here](https://github.com/huggingface/datasets/pull/2717)", "Fixed by #2717." ]
1,627,305,899,000
1,627,322,683,000
1,627,322,683,000
CONTRIBUTOR
null
When using dataset in streaming mode, if one applies `shuffle` method on the dataset and `map` method for which `batched=True` than the batching operation will not happen, instead `batched` will be set to `False` I did RCA on the dataset codebase, the problem is emerging from [this line of code](https://github.com/huggingface/datasets/blob/d25a0bf94d9f9a9aa6cabdf5b450b9c327d19729/src/datasets/iterable_dataset.py#L197) here as it is `self.ex_iterable.shuffle_data_sources(seed), function=self.function, batch_size=self.batch_size`, as one can see it is missing batched argument, which means that the iterator fallsback to default constructor value, which in this case is `False`. To remedy the problem we can change this line to `self.ex_iterable.shuffle_data_sources(seed), function=self.function, batched=self.batched, batch_size=self.batch_size`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2716/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2716/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2715
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2715/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2715/comments
https://api.github.com/repos/huggingface/datasets/issues/2715/events
https://github.com/huggingface/datasets/pull/2715
952,845,229
MDExOlB1bGxSZXF1ZXN0Njk2OTc5MjQ1
2,715
Update PAN-X data URL in XTREME dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Merging since the CI is just about missing infos in the dataset card" ]
1,627,302,077,000
1,627,306,079,000
1,627,306,079,000
MEMBER
null
Related to #2710, #2691.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2715/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2715/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2715", "html_url": "https://github.com/huggingface/datasets/pull/2715", "diff_url": "https://github.com/huggingface/datasets/pull/2715.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2715.patch", "merged_at": 1627306079000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2714
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2714/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2714/comments
https://api.github.com/repos/huggingface/datasets/issues/2714/events
https://github.com/huggingface/datasets/issues/2714
952,580,820
MDU6SXNzdWU5NTI1ODA4MjA=
2,714
add more precise information for size
{ "login": "pennyl67", "id": 1493902, "node_id": "MDQ6VXNlcjE0OTM5MDI=", "avatar_url": "https://avatars.githubusercontent.com/u/1493902?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pennyl67", "html_url": "https://github.com/pennyl67", "followers_url": "https://api.github.com/users/pennyl67/followers", "following_url": "https://api.github.com/users/pennyl67/following{/other_user}", "gists_url": "https://api.github.com/users/pennyl67/gists{/gist_id}", "starred_url": "https://api.github.com/users/pennyl67/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pennyl67/subscriptions", "organizations_url": "https://api.github.com/users/pennyl67/orgs", "repos_url": "https://api.github.com/users/pennyl67/repos", "events_url": "https://api.github.com/users/pennyl67/events{/privacy}", "received_events_url": "https://api.github.com/users/pennyl67/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "We already have this information in the dataset_infos.json files of each dataset.\r\nMaybe we can parse these files in the backend to return their content with the endpoint at huggingface.co/api/datasets\r\n\r\nFor now if you want to access this info you have to load the json for each dataset. For example:\r\n- for a dataset on github like `squad` \r\n- https://raw.githubusercontent.com/huggingface/datasets/master/datasets/squad/dataset_infos.json\r\n- for a community dataset on the hub like `lhoestq/squad`:\r\n https://huggingface.co/datasets/lhoestq/squad/resolve/main/dataset_infos.json" ]
1,627,283,463,000
1,627,290,985,000
null
NONE
null
For the import into ELG, we would like a more precise description of the size of the dataset, instead of the current size categories. The size can be expressed in bytes, or any other preferred size unit. As suggested in the slack channel, perhaps this could be computed with a regex for existing datasets.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2714/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2714/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2713
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2713/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2713/comments
https://api.github.com/repos/huggingface/datasets/issues/2713/events
https://github.com/huggingface/datasets/pull/2713
952,515,256
MDExOlB1bGxSZXF1ZXN0Njk2Njk3MzU0
2,713
Enumerate all ner_tags values in WNUT 17 dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,627,276,936,000
1,627,291,855,000
1,627,291,855,000
MEMBER
null
This PR does: - Enumerate all ner_tags in dataset card Data Fields section - Add all metadata tags to dataset card Close #2709.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2713/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2713/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2713", "html_url": "https://github.com/huggingface/datasets/pull/2713", "diff_url": "https://github.com/huggingface/datasets/pull/2713.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2713.patch", "merged_at": 1627291854000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2710
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2710/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2710/comments
https://api.github.com/repos/huggingface/datasets/issues/2710/events
https://github.com/huggingface/datasets/pull/2710
951,723,326
MDExOlB1bGxSZXF1ZXN0Njk2MDYyNjAy
2,710
Update WikiANN data URL
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "We have to update the URL in the XTREME benchmark as well:\r\n\r\nhttps://github.com/huggingface/datasets/blob/0dfc639cec450ed8762a997789a2ed63e63cdcf2/datasets/xtreme/xtreme.py#L411-L411\r\n\r\n" ]
1,627,057,761,000
1,627,292,063,000
1,627,292,063,000
MEMBER
null
WikiANN data source URL is no longer accessible: 404 error from Dropbox. We have decided to host it at Hugging Face. This PR updates the data source URL, the metadata JSON file and the dataset card. Close #2691.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2710/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2710/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2710", "html_url": "https://github.com/huggingface/datasets/pull/2710", "diff_url": "https://github.com/huggingface/datasets/pull/2710.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2710.patch", "merged_at": 1627292062000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2709
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2709/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2709/comments
https://api.github.com/repos/huggingface/datasets/issues/2709/events
https://github.com/huggingface/datasets/issues/2709
951,534,757
MDU6SXNzdWU5NTE1MzQ3NTc=
2,709
Missing documentation for wnut_17 (ner_tags)
{ "login": "maxpel", "id": 31095360, "node_id": "MDQ6VXNlcjMxMDk1MzYw", "avatar_url": "https://avatars.githubusercontent.com/u/31095360?v=4", "gravatar_id": "", "url": "https://api.github.com/users/maxpel", "html_url": "https://github.com/maxpel", "followers_url": "https://api.github.com/users/maxpel/followers", "following_url": "https://api.github.com/users/maxpel/following{/other_user}", "gists_url": "https://api.github.com/users/maxpel/gists{/gist_id}", "starred_url": "https://api.github.com/users/maxpel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/maxpel/subscriptions", "organizations_url": "https://api.github.com/users/maxpel/orgs", "repos_url": "https://api.github.com/users/maxpel/repos", "events_url": "https://api.github.com/users/maxpel/events{/privacy}", "received_events_url": "https://api.github.com/users/maxpel/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @maxpel, thanks for reporting this issue.\r\n\r\nIndeed, the documentation in the dataset card is not complete. I’m opening a Pull Request to fix it.\r\n\r\nAs the paper explains, there are 6 entity types and we have ordered them alphabetically: `corporation`, `creative-work`, `group`, `location`, `person` and `product`. \r\n\r\nEach of these entity types has 2 possible IOB2 format tags: \r\n- `B-`: to indicate that the token is the beginning of an entity name, and the \r\n- `I-`: to indicate that the token is inside an entity name. \r\n\r\nAdditionally, there is the standalone IOB2 tag \r\n- `O`: that indicates that the token belongs to no named entity. \r\n\r\nIn total there are 13 possible tags, which correspond to the following integer numbers:\r\n\r\n0. `O`\r\n1. `B-corporation`\r\n2. `I-corporation`\r\n3. `B-creative-work`\r\n4. `I-creative-work`\r\n5. `B-group`\r\n6. `I-group`\r\n7. `B-location`\r\n8. `I-location`\r\n9. `B-person`\r\n10. `I-person`\r\n11. `B-product`\r\n12. `I-product`" ]
1,627,043,132,000
1,627,291,855,000
1,627,291,855,000
CONTRIBUTOR
null
On the info page of the wnut_17 data set (https://huggingface.co/datasets/wnut_17), the model output of ner-tags is only documented for these 5 cases: `ner_tags: a list of classification labels, with possible values including O (0), B-corporation (1), I-corporation (2), B-creative-work (3), I-creative-work (4).` I trained a model with the data and it gives me 13 classes: ``` "id2label": { "0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "10": 10, "11": 11, "12": 12 } "label2id": { "0": 0, "1": 1, "10": 10, "11": 11, "12": 12, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9 } ``` The paper (https://www.aclweb.org/anthology/W17-4418.pdf) explains those 6 categories, but the ordering does not match: ``` 1. person 2. location (including GPE, facility) 3. corporation 4. product (tangible goods, or well-defined services) 5. creative-work (song, movie, book and so on) 6. group (subsuming music band, sports team, and non-corporate organisations) ``` I would be very helpful for me, if somebody could clarify the model ouputs and explain the "B-" and "I-" prefixes to me. Really great work with that and the other packages, I couldn't believe that training the model with that data was basically a one-liner!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2709/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2709/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2708
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2708/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2708/comments
https://api.github.com/repos/huggingface/datasets/issues/2708/events
https://github.com/huggingface/datasets/issues/2708
951,092,660
MDU6SXNzdWU5NTEwOTI2NjA=
2,708
QASC: incomplete training set
{ "login": "danyaljj", "id": 2441454, "node_id": "MDQ6VXNlcjI0NDE0NTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/2441454?v=4", "gravatar_id": "", "url": "https://api.github.com/users/danyaljj", "html_url": "https://github.com/danyaljj", "followers_url": "https://api.github.com/users/danyaljj/followers", "following_url": "https://api.github.com/users/danyaljj/following{/other_user}", "gists_url": "https://api.github.com/users/danyaljj/gists{/gist_id}", "starred_url": "https://api.github.com/users/danyaljj/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/danyaljj/subscriptions", "organizations_url": "https://api.github.com/users/danyaljj/orgs", "repos_url": "https://api.github.com/users/danyaljj/repos", "events_url": "https://api.github.com/users/danyaljj/events{/privacy}", "received_events_url": "https://api.github.com/users/danyaljj/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @danyaljj, thanks for reporting.\r\n\r\nUnfortunately, I have not been able to reproduce your problem. My train split has 8134 examples:\r\n```ipython\r\nIn [10]: ds[\"train\"]\r\nOut[10]:\r\nDataset({\r\n features: ['id', 'question', 'choices', 'answerKey', 'fact1', 'fact2', 'combinedfact', 'formatted_question'],\r\n num_rows: 8134\r\n})\r\n\r\nIn [11]: ds[\"train\"].shape\r\nOut[11]: (8134, 8)\r\n```\r\nand the content of the last 5 examples is:\r\n```ipython\r\nIn [12]: for i in range(8129, 8134):\r\n ...: print(json.dumps(ds[\"train\"][i]))\r\n ...:\r\n{\"id\": \"3KAKFY4PGU1LGXM77JAK2700NGCI3X\", \"question\": \"Chitin can be used for protection by whom?\", \"choices\": {\"text\": [\"Fungi\", \"People\", \"Man\", \"Fish\", \"trees\", \"Dogs\", \"animal\", \"Birds\"], \"label\": [\"A\", \"B\",\r\n \"C\", \"D\", \"E\", \"F\", \"G\", \"H\"]}, \"answerKey\": \"D\", \"fact1\": \"scales are used for protection by scaled animals\", \"fact2\": \"Fish scales are also composed of chitin.\", \"combinedfact\": \"Chitin can be used for prote\r\nction by fish.\", \"formatted_question\": \"Chitin can be used for protection by whom? (A) Fungi (B) People (C) Man (D) Fish (E) trees (F) Dogs (G) animal (H) Birds\"}\r\n{\"id\": \"336YQZE83VDAQVZ26HW59X51JZ9M5M\", \"question\": \"Which type of animal uses plates for protection?\", \"choices\": {\"text\": [\"squids\", \"reptiles\", \"sea urchins\", \"fish\", \"amphibians\", \"Frogs\", \"mammals\", \"salm\r\non\"], \"label\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\"]}, \"answerKey\": \"B\", \"fact1\": \"scales are used for protection by scaled animals\", \"fact2\": \"Reptiles have scales or plates.\", \"combinedfact\": \"Reptiles use\r\n their plates for protection.\", \"formatted_question\": \"Which type of animal uses plates for protection? (A) squids (B) reptiles (C) sea urchins (D) fish (E) amphibians (F) Frogs (G) mammals (H) salmon\"}\r\n{\"id\": \"3WZ36BJEV3FGS66VGOOUYX0LN8GTBU\", \"question\": \"What are used for protection by fish?\", \"choices\": {\"text\": [\"scales\", \"fins\", \"streams.\", \"coral\", \"gills\", \"Collagen\", \"mussels\", \"whiskers\"], \"label\": [\"\r\nA\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\"]}, \"answerKey\": \"A\", \"fact1\": \"scales are used for protection by scaled animals\", \"fact2\": \"Fish are backboned aquatic animals.\", \"combinedfact\": \"scales are used for prote\r\nction by fish \", \"formatted_question\": \"What are used for protection by fish? (A) scales (B) fins (C) streams. (D) coral (E) gills (F) Collagen (G) mussels (H) whiskers\"}\r\n{\"id\": \"3Z2R0DQ0JHDKFAO2706OYIXGNA4E28\", \"question\": \"What are pangolins covered in?\", \"choices\": {\"text\": [\"tunicates\", \"Echinoids\", \"shells\", \"exoskeleton\", \"blastoids\", \"barrel-shaped\", \"protection\", \"white\"\r\n], \"label\": [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\"]}, \"answerKey\": \"G\", \"fact1\": \"scales are used for protection by scaled animals\", \"fact2\": \"Pangolins have an elongate and tapering body covered above with ov\r\nerlapping scales.\", \"combinedfact\": \"Pangolins are covered in overlapping protection.\", \"formatted_question\": \"What are pangolins covered in? (A) tunicates (B) Echinoids (C) shells (D) exoskeleton (E) blastoids\r\n (F) barrel-shaped (G) protection (H) white\"}\r\n{\"id\": \"3PMBY0YE272GIWPNWIF8IH5RBHVC9S\", \"question\": \"What are covered with protection?\", \"choices\": {\"text\": [\"apples\", \"trees\", \"coral\", \"clams\", \"roses\", \"wings\", \"hats\", \"fish\"], \"label\": [\"A\", \"B\", \"C\", \"D\r\n\", \"E\", \"F\", \"G\", \"H\"]}, \"answerKey\": \"H\", \"fact1\": \"scales are used for protection by scaled animals\", \"fact2\": \"Fish are covered with scales.\", \"combinedfact\": \"Fish are covered with protection\", \"formatted_q\r\nuestion\": \"What are covered with protection? (A) apples (B) trees (C) coral (D) clams (E) roses (F) wings (G) hats (H) fish\"}\r\n```\r\n\r\nCould you please load again your dataset and print its shape, like this:\r\n```python\r\nds = load_dataset(\"qasc\", split=\"train)\r\nprint(ds.shape)\r\n```\r\nand confirm which is your output?", "Hmm .... it must have been a mistake on my side. Sorry for the hassle! " ]
1,626,991,184,000
1,627,047,007,000
1,627,047,007,000
CONTRIBUTOR
null
## Describe the bug The training instances are not loaded properly. ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("qasc", script_version='1.10.2') def load_instances(split): instances = dataset[split] print(f"split: {split} - size: {len(instances)}") for x in instances: print(json.dumps(x)) load_instances('test') load_instances('validation') load_instances('train') ``` ## results For test and validation, we can see the examples in the output (which is good!): ``` split: test - size: 920 {"answerKey": "", "choices": {"label": ["A", "B", "C", "D", "E", "F", "G", "H"], "text": ["Anthax", "under water", "uterus", "wombs", "two", "moles", "live", "embryo"]}, "combinedfact": "", "fact1": "", "fact2": "", "formatted_question": "What type of birth do therian mammals have? (A) Anthax (B) under water (C) uterus (D) wombs (E) two (F) moles (G) live (H) embryo", "id": "3C44YUNSI1OBFBB8D36GODNOZN9DPA", "question": "What type of birth do therian mammals have?"} {"answerKey": "", "choices": {"label": ["A", "B", "C", "D", "E", "F", "G", "H"], "text": ["Corvidae", "arthropods", "birds", "backbones", "keratin", "Jurassic", "front paws", "Parakeets."]}, "combinedfact": "", "fact1": "", "fact2": "", "formatted_question": "By what time had mouse-sized viviparous mammals evolved? (A) Corvidae (B) arthropods (C) birds (D) backbones (E) keratin (F) Jurassic (G) front paws (H) Parakeets.", "id": "3B1NLC6UGZVERVLZFT7OUYQLD1SGPZ", "question": "By what time had mouse-sized viviparous mammals evolved?"} {"answerKey": "", "choices": {"label": ["A", "B", "C", "D", "E", "F", "G", "H"], "text": ["Reduced friction", "causes infection", "vital to a good life", "prevents water loss", "camouflage from consumers", "Protection against predators", "spur the growth of the plant", "a smooth surface"]}, "combinedfact": "", "fact1": "", "fact2": "", "formatted_question": "What does a plant's skin do? (A) Reduced friction (B) causes infection (C) vital to a good life (D) prevents water loss (E) camouflage from consumers (F) Protection against predators (G) spur the growth of the plant (H) a smooth surface", "id": "3QRYMNZ7FYGITFVSJET3PS0F4S0NT9", "question": "What does a plant's skin do?"} ... ``` However, only a few instances are loaded for the training split, which is not correct. ## Environment info - `datasets` version: '1.10.2' - Platform: MaxOS - Python version:3.7 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2708/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2708/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2707
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2707/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2707/comments
https://api.github.com/repos/huggingface/datasets/issues/2707/events
https://github.com/huggingface/datasets/issues/2707
950,812,945
MDU6SXNzdWU5NTA4MTI5NDU=
2,707
404 Not Found Error when loading LAMA dataset
{ "login": "dwil2444", "id": 26467159, "node_id": "MDQ6VXNlcjI2NDY3MTU5", "avatar_url": "https://avatars.githubusercontent.com/u/26467159?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dwil2444", "html_url": "https://github.com/dwil2444", "followers_url": "https://api.github.com/users/dwil2444/followers", "following_url": "https://api.github.com/users/dwil2444/following{/other_user}", "gists_url": "https://api.github.com/users/dwil2444/gists{/gist_id}", "starred_url": "https://api.github.com/users/dwil2444/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dwil2444/subscriptions", "organizations_url": "https://api.github.com/users/dwil2444/orgs", "repos_url": "https://api.github.com/users/dwil2444/repos", "events_url": "https://api.github.com/users/dwil2444/events{/privacy}", "received_events_url": "https://api.github.com/users/dwil2444/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @dwil2444! I was able to reproduce your error when I downgraded to v1.1.2. Updating to the latest version of Datasets fixed the error for me :)", "Hi @dwil2444, thanks for reporting.\r\n\r\nCould you please confirm which `datasets` version you were using and if the problem persists after you update it to the latest version: `pip install -U datasets`?\r\n\r\nThanks @stevhliu for the hint to fix this! ;)", "@stevhliu @albertvillanova updating to the latest version of datasets did in fact fix this issue. Thanks a lot for your help!" ]
1,626,969,153,000
1,627,309,747,000
1,627,309,747,000
NONE
null
The [LAMA](https://huggingface.co/datasets/viewer/?dataset=lama) probing dataset is not available for download: Steps to Reproduce: 1. `from datasets import load_dataset` 2. `dataset = load_dataset('lama', 'trex')`. Results: `FileNotFoundError: Couldn't find file locally at lama/lama.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.1.2/datasets/lama/lama.py or https://s3.amazonaws.com/datasets.huggingface.co/datasets/datasets/lama/lama.py`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2707/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2707/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2706
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2706/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2706/comments
https://api.github.com/repos/huggingface/datasets/issues/2706/events
https://github.com/huggingface/datasets/pull/2706
950,606,561
MDExOlB1bGxSZXF1ZXN0Njk1MTI3ODgz
2,706
Update BibTeX entry
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,956,969,000
1,626,957,780,000
1,626,957,780,000
MEMBER
null
Update BibTeX entry.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2706/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2706/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2706", "html_url": "https://github.com/huggingface/datasets/pull/2706", "diff_url": "https://github.com/huggingface/datasets/pull/2706.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2706.patch", "merged_at": 1626957780000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2705
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2705/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2705/comments
https://api.github.com/repos/huggingface/datasets/issues/2705/events
https://github.com/huggingface/datasets/issues/2705
950,488,583
MDU6SXNzdWU5NTA0ODg1ODM=
2,705
404 not found error on loading WIKIANN dataset
{ "login": "ronbutan", "id": 39296659, "node_id": "MDQ6VXNlcjM5Mjk2NjU5", "avatar_url": "https://avatars.githubusercontent.com/u/39296659?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ronbutan", "html_url": "https://github.com/ronbutan", "followers_url": "https://api.github.com/users/ronbutan/followers", "following_url": "https://api.github.com/users/ronbutan/following{/other_user}", "gists_url": "https://api.github.com/users/ronbutan/gists{/gist_id}", "starred_url": "https://api.github.com/users/ronbutan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ronbutan/subscriptions", "organizations_url": "https://api.github.com/users/ronbutan/orgs", "repos_url": "https://api.github.com/users/ronbutan/repos", "events_url": "https://api.github.com/users/ronbutan/events{/privacy}", "received_events_url": "https://api.github.com/users/ronbutan/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @ronbutan, thanks for reporting.\r\n\r\nYou are right: we have recently found that the link to the original PAN-X dataset (also called WikiANN), hosted at Dropbox, is no longer working.\r\n\r\nWe have opened an issue in the GitHub repository of the original dataset (afshinrahimi/mmner#4) and we have also contacted the author by email to ask if they are planning to fix this issue. See the details here: https://github.com/huggingface/datasets/issues/2691#issuecomment-885463027\r\n\r\nI close this issue because it is the same as in #2691. Feel free to subscribe to that other issue to be informed about any updates." ]
1,626,947,750,000
1,627,027,652,000
1,627,027,652,000
NONE
null
## Describe the bug Unable to retreive wikiann English dataset ## Steps to reproduce the bug ```python from datasets import list_datasets, load_dataset, list_metrics, load_metric WIKIANN = load_dataset("wikiann","en") ``` ## Expected results Colab notebook should display successful download status ## Actual results FileNotFoundError: Couldn't find file at https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1 ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.10.1 - Platform: Linux-5.4.104+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.11 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2705/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2705/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2704
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2704/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2704/comments
https://api.github.com/repos/huggingface/datasets/issues/2704/events
https://github.com/huggingface/datasets/pull/2704
950,483,980
MDExOlB1bGxSZXF1ZXN0Njk1MDIzMTEz
2,704
Fix pick default config name message
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,947,383,000
1,626,948,161,000
1,626,948,160,000
MEMBER
null
The error message to tell which config name to load is not displayed. This is because in the code it was considering the config kwargs to be non-empty, which is a special case for custom configs created on the fly. It appears after this change: https://github.com/huggingface/datasets/pull/2659 I fixed that by making the config kwargs empty by default, even if default parameters are passed Fix https://github.com/huggingface/datasets/issues/2703
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2704/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2704/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2704", "html_url": "https://github.com/huggingface/datasets/pull/2704", "diff_url": "https://github.com/huggingface/datasets/pull/2704.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2704.patch", "merged_at": 1626948160000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2703
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2703/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2703/comments
https://api.github.com/repos/huggingface/datasets/issues/2703/events
https://github.com/huggingface/datasets/issues/2703
950,482,284
MDU6SXNzdWU5NTA0ODIyODQ=
2,703
Bad message when config name is missing
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,947,243,000
1,626,948,160,000
1,626,948,160,000
MEMBER
null
When loading a dataset that have several configurations, we expect to see an error message if the user doesn't specify a config name. However in `datasets` 1.10.0 and 1.10.1 it doesn't show the right message: ```python import datasets datasets.load_dataset("glue") ``` raises ```python AttributeError: 'BuilderConfig' object has no attribute 'text_features' ``` instead of ```python ValueError: Config name is missing. Please pick one among the available configs: ['cola', 'sst2', 'mrpc', 'qqp', 'stsb', 'mnli', 'mnli_mismatched', 'mnli_matched', 'qnli', 'rte', 'wnli', 'ax'] Example of usage: `load_dataset('glue', 'cola')` ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2703/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2703/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2702
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2702/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2702/comments
https://api.github.com/repos/huggingface/datasets/issues/2702/events
https://github.com/huggingface/datasets/pull/2702
950,448,159
MDExOlB1bGxSZXF1ZXN0Njk0OTkyOTc1
2,702
Update BibTeX entry
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,944,679,000
1,626,945,459,000
1,626,945,458,000
MEMBER
null
Update BibTeX entry.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2702/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2702/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2702", "html_url": "https://github.com/huggingface/datasets/pull/2702", "diff_url": "https://github.com/huggingface/datasets/pull/2702.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2702.patch", "merged_at": 1626945458000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2701
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2701/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2701/comments
https://api.github.com/repos/huggingface/datasets/issues/2701/events
https://github.com/huggingface/datasets/pull/2701
950,422,403
MDExOlB1bGxSZXF1ZXN0Njk0OTcxMzM3
2,701
Fix download_mode docstrings
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,942,625,000
1,626,946,411,000
1,626,946,411,000
MEMBER
null
Fix `download_mode` docstrings.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2701/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2701/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2701", "html_url": "https://github.com/huggingface/datasets/pull/2701", "diff_url": "https://github.com/huggingface/datasets/pull/2701.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2701.patch", "merged_at": 1626946411000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2700
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2700/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2700/comments
https://api.github.com/repos/huggingface/datasets/issues/2700/events
https://github.com/huggingface/datasets/issues/2700
950,276,325
MDU6SXNzdWU5NTAyNzYzMjU=
2,700
from datasets import Dataset is failing
{ "login": "kswamy15", "id": 5582286, "node_id": "MDQ6VXNlcjU1ODIyODY=", "avatar_url": "https://avatars.githubusercontent.com/u/5582286?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kswamy15", "html_url": "https://github.com/kswamy15", "followers_url": "https://api.github.com/users/kswamy15/followers", "following_url": "https://api.github.com/users/kswamy15/following{/other_user}", "gists_url": "https://api.github.com/users/kswamy15/gists{/gist_id}", "starred_url": "https://api.github.com/users/kswamy15/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kswamy15/subscriptions", "organizations_url": "https://api.github.com/users/kswamy15/orgs", "repos_url": "https://api.github.com/users/kswamy15/repos", "events_url": "https://api.github.com/users/kswamy15/events{/privacy}", "received_events_url": "https://api.github.com/users/kswamy15/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @kswamy15, thanks for reporting.\r\n\r\nWe are fixing this critical issue and making an urgent patch release of the `datasets` library today.\r\n\r\nIn the meantime, you can circumvent this issue by updating the `tqdm` library: `!pip install -U tqdm`" ]
1,626,925,883,000
1,626,938,625,000
1,626,937,747,000
NONE
null
## Describe the bug A clear and concise description of what the bug is. ## Steps to reproduce the bug ```python # Sample code to reproduce the bug from datasets import Dataset ``` ## Expected results A clear and concise description of the expected results. ## Actual results Specify the actual results or traceback. /usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py in <module>() 25 import posixpath 26 import requests ---> 27 from tqdm.contrib.concurrent import thread_map 28 29 from .. import __version__, config, utils ModuleNotFoundError: No module named 'tqdm.contrib.concurrent' --------------------------------------------------------------------------- NOTE: If your import is failing due to a missing package, you can manually install dependencies using either !pip or !apt. To view examples of installing some common dependencies, click the "Open Examples" button below. --------------------------------------------------------------------------- ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: latest version as of 07/21/2021 - Platform: Google Colab - Python version: 3.7 - PyArrow version:
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2700/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2700/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2699
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2699/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2699/comments
https://api.github.com/repos/huggingface/datasets/issues/2699/events
https://github.com/huggingface/datasets/issues/2699
950,221,226
MDU6SXNzdWU5NTAyMjEyMjY=
2,699
cannot combine splits merging and streaming?
{ "login": "eyaler", "id": 4436747, "node_id": "MDQ6VXNlcjQ0MzY3NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/4436747?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eyaler", "html_url": "https://github.com/eyaler", "followers_url": "https://api.github.com/users/eyaler/followers", "following_url": "https://api.github.com/users/eyaler/following{/other_user}", "gists_url": "https://api.github.com/users/eyaler/gists{/gist_id}", "starred_url": "https://api.github.com/users/eyaler/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eyaler/subscriptions", "organizations_url": "https://api.github.com/users/eyaler/orgs", "repos_url": "https://api.github.com/users/eyaler/repos", "events_url": "https://api.github.com/users/eyaler/events{/privacy}", "received_events_url": "https://api.github.com/users/eyaler/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi ! That's missing indeed. We'll try to implement this for the next version :)\r\n\r\nI guess we just need to implement #2564 first, and then we should be able to add support for splits combinations" ]
1,626,916,405,000
1,626,942,467,000
null
NONE
null
this does not work: `dataset = datasets.load_dataset('mc4','iw',split='train+validation',streaming=True)` with error: `ValueError: Bad split: train+validation. Available splits: ['train', 'validation']` these work: `dataset = datasets.load_dataset('mc4','iw',split='train+validation')` `dataset = datasets.load_dataset('mc4','iw',split='train',streaming=True)` `dataset = datasets.load_dataset('mc4','iw',split='validation',streaming=True)` i could not find a reference to this in the documentation and the error message is confusing. also would be nice to allow streaming for the merged splits
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2699/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2699/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2698
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2698/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2698/comments
https://api.github.com/repos/huggingface/datasets/issues/2698/events
https://github.com/huggingface/datasets/pull/2698
950,159,867
MDExOlB1bGxSZXF1ZXN0Njk0NzUxMzMw
2,698
Ignore empty batch when writing
{ "login": "pcuenca", "id": 1177582, "node_id": "MDQ6VXNlcjExNzc1ODI=", "avatar_url": "https://avatars.githubusercontent.com/u/1177582?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pcuenca", "html_url": "https://github.com/pcuenca", "followers_url": "https://api.github.com/users/pcuenca/followers", "following_url": "https://api.github.com/users/pcuenca/following{/other_user}", "gists_url": "https://api.github.com/users/pcuenca/gists{/gist_id}", "starred_url": "https://api.github.com/users/pcuenca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pcuenca/subscriptions", "organizations_url": "https://api.github.com/users/pcuenca/orgs", "repos_url": "https://api.github.com/users/pcuenca/repos", "events_url": "https://api.github.com/users/pcuenca/events{/privacy}", "received_events_url": "https://api.github.com/users/pcuenca/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,906,930,000
1,627,311,363,000
1,627,305,926,000
CONTRIBUTOR
null
This prevents an schema update with unknown column types, as reported in #2644. This is my first attempt at fixing the issue. I tested the following: - First batch returned by a batched map operation is empty. - An intermediate batch is empty. - `python -m unittest tests.test_arrow_writer` passes. However, `arrow_writer` looks like a pretty generic interface, I'm not sure if there are other uses I may have overlooked. Let me know if that's the case, or if a better approach would be preferable.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2698/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2698/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2698", "html_url": "https://github.com/huggingface/datasets/pull/2698", "diff_url": "https://github.com/huggingface/datasets/pull/2698.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2698.patch", "merged_at": 1627305926000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2697
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2697/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2697/comments
https://api.github.com/repos/huggingface/datasets/issues/2697/events
https://github.com/huggingface/datasets/pull/2697
950,021,623
MDExOlB1bGxSZXF1ZXN0Njk0NjMyODg0
2,697
Fix import on Colab
{ "login": "nateraw", "id": 32437151, "node_id": "MDQ6VXNlcjMyNDM3MTUx", "avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nateraw", "html_url": "https://github.com/nateraw", "followers_url": "https://api.github.com/users/nateraw/followers", "following_url": "https://api.github.com/users/nateraw/following{/other_user}", "gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}", "starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nateraw/subscriptions", "organizations_url": "https://api.github.com/users/nateraw/orgs", "repos_url": "https://api.github.com/users/nateraw/repos", "events_url": "https://api.github.com/users/nateraw/events{/privacy}", "received_events_url": "https://api.github.com/users/nateraw/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "@lhoestq @albertvillanova - It might be a good idea to have a patch release after this gets merged (presumably tomorrow morning when you're around). The Colab issue linked to this PR is a pretty big blocker. " ]
1,626,894,218,000
1,626,937,748,000
1,626,937,747,000
CONTRIBUTOR
null
Fix #2695, fix #2700.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2697/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2697/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2697", "html_url": "https://github.com/huggingface/datasets/pull/2697", "diff_url": "https://github.com/huggingface/datasets/pull/2697.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2697.patch", "merged_at": 1626937746000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2696
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2696/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2696/comments
https://api.github.com/repos/huggingface/datasets/issues/2696/events
https://github.com/huggingface/datasets/pull/2696
949,901,726
MDExOlB1bGxSZXF1ZXN0Njk0NTMwODg3
2,696
Add support for disable_progress_bar on Windows
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "The CI failure seems unrelated to this PR (probably has something to do with Transformers)." ]
1,626,885,293,000
1,627,306,274,000
1,627,292,317,000
CONTRIBUTOR
null
This PR is a continuation of #2667 and adds support for `utils.disable_progress_bar()` on Windows when using multiprocessing. This [answer](https://stackoverflow.com/a/6596695/14095927) on SO explains it nicely why the current approach (with calling `utils.is_progress_bar_enabled()` inside `Dataset._map_single`) would not work on Windows.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2696/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2696/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2696", "html_url": "https://github.com/huggingface/datasets/pull/2696", "diff_url": "https://github.com/huggingface/datasets/pull/2696.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2696.patch", "merged_at": 1627292317000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2695
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2695/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2695/comments
https://api.github.com/repos/huggingface/datasets/issues/2695/events
https://github.com/huggingface/datasets/issues/2695
949,864,823
MDU6SXNzdWU5NDk4NjQ4MjM=
2,695
Cannot import load_dataset on Colab
{ "login": "bayartsogt-ya", "id": 43239645, "node_id": "MDQ6VXNlcjQzMjM5NjQ1", "avatar_url": "https://avatars.githubusercontent.com/u/43239645?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bayartsogt-ya", "html_url": "https://github.com/bayartsogt-ya", "followers_url": "https://api.github.com/users/bayartsogt-ya/followers", "following_url": "https://api.github.com/users/bayartsogt-ya/following{/other_user}", "gists_url": "https://api.github.com/users/bayartsogt-ya/gists{/gist_id}", "starred_url": "https://api.github.com/users/bayartsogt-ya/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bayartsogt-ya/subscriptions", "organizations_url": "https://api.github.com/users/bayartsogt-ya/orgs", "repos_url": "https://api.github.com/users/bayartsogt-ya/repos", "events_url": "https://api.github.com/users/bayartsogt-ya/events{/privacy}", "received_events_url": "https://api.github.com/users/bayartsogt-ya/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "I'm facing the same issue on Colab today too.\r\n\r\n```\r\nModuleNotFoundError Traceback (most recent call last)\r\n<ipython-input-4-5833ac0f5437> in <module>()\r\n 3 \r\n 4 from ray import tune\r\n----> 5 from datasets import DatasetDict, Dataset\r\n 6 from datasets import load_dataset, load_metric\r\n 7 from dataclasses import dataclass\r\n\r\n7 frames\r\n/usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py in <module>()\r\n 25 import posixpath\r\n 26 import requests\r\n---> 27 from tqdm.contrib.concurrent import thread_map\r\n 28 \r\n 29 from .. import __version__, config, utils\r\n\r\nModuleNotFoundError: No module named 'tqdm.contrib.concurrent'\r\n\r\n---------------------------------------------------------------------------\r\nNOTE: If your import is failing due to a missing package, you can\r\nmanually install dependencies using either !pip or !apt.\r\n\r\nTo view examples of installing some common dependencies, click the\r\n\"Open Examples\" button below.\r\n---------------------------------------------------------------------------\r\n```", "@phosseini \r\nI think it is related to [1.10.0](https://github.com/huggingface/datasets/actions/runs/1052653701) release done 3 hours ago. (cc: @lhoestq )\r\nFor now I just downgraded to 1.9.0 and it is working fine.", "> @phosseini\r\n> I think it is related to [1.10.0](https://github.com/huggingface/datasets/actions/runs/1052653701) release done 3 hours ago. (cc: @lhoestq )\r\n> For now I just downgraded to 1.9.0 and it is working fine.\r\n\r\nSame here, downgraded to 1.9.0 for now and works fine.", "Hi, \r\n\r\nupdating tqdm to the newest version resolves the issue for me. You can do this as follows in Colab:\r\n```\r\n!pip install tqdm --upgrade\r\n```", "Hi @bayartsogt-ya and @phosseini, thanks for reporting.\r\n\r\nWe are fixing this critical issue and making an urgent patch release of the `datasets` library today.\r\n\r\nIn the meantime, as pointed out by @mariosasko, you can circumvent this issue by updating the `tqdm` library: \r\n```\r\n!pip install -U tqdm\r\n```" ]
1,626,882,771,000
1,626,938,785,000
1,626,937,747,000
NONE
null
## Describe the bug Got tqdm concurrent module not found error during importing load_dataset from datasets. ## Steps to reproduce the bug Here [colab notebook](https://colab.research.google.com/drive/1pErWWnVP4P4mVHjSFUtkePd8Na_Qirg4?usp=sharing) to reproduce the error On colab: ```python !pip install datasets from datasets import load_dataset ``` ## Expected results Works without error ## Actual results Specify the actual results or traceback. ``` ModuleNotFoundError Traceback (most recent call last) <ipython-input-2-8cc7de4c69eb> in <module>() ----> 1 from datasets import load_dataset, load_metric, Metric, MetricInfo, Features, Value 2 from sklearn.metrics import mean_squared_error /usr/local/lib/python3.7/dist-packages/datasets/__init__.py in <module>() 31 ) 32 ---> 33 from .arrow_dataset import Dataset, concatenate_datasets 34 from .arrow_reader import ArrowReader, ReadInstruction 35 from .arrow_writer import ArrowWriter /usr/local/lib/python3.7/dist-packages/datasets/arrow_dataset.py in <module>() 40 from tqdm.auto import tqdm 41 ---> 42 from datasets.tasks.text_classification import TextClassification 43 44 from . import config, utils /usr/local/lib/python3.7/dist-packages/datasets/tasks/__init__.py in <module>() 1 from typing import Optional 2 ----> 3 from ..utils.logging import get_logger 4 from .automatic_speech_recognition import AutomaticSpeechRecognition 5 from .base import TaskTemplate /usr/local/lib/python3.7/dist-packages/datasets/utils/__init__.py in <module>() 19 20 from . import logging ---> 21 from .download_manager import DownloadManager, GenerateMode 22 from .file_utils import DownloadConfig, cached_path, hf_bucket_url, is_remote_url, temp_seed 23 from .mock_download_manager import MockDownloadManager /usr/local/lib/python3.7/dist-packages/datasets/utils/download_manager.py in <module>() 24 25 from .. import config ---> 26 from .file_utils import ( 27 DownloadConfig, 28 cached_path, /usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py in <module>() 25 import posixpath 26 import requests ---> 27 from tqdm.contrib.concurrent import thread_map 28 29 from .. import __version__, config, utils ModuleNotFoundError: No module named 'tqdm.contrib.concurrent' ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.10.0 - Platform: Colab - Python version: 3.7.11 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2695/reactions", "total_count": 3, "+1": 3, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2695/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2694
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2694/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2694/comments
https://api.github.com/repos/huggingface/datasets/issues/2694/events
https://github.com/huggingface/datasets/pull/2694
949,844,722
MDExOlB1bGxSZXF1ZXN0Njk0NDg0NTcy
2,694
fix: 🐛 change string format to allow copy/paste to work in bash
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,881,440,000
1,626,950,507,000
1,626,950,507,000
CONTRIBUTOR
null
Before: copy/paste resulted in an error because the square bracket characters `[]` are special characters in bash
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2694/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2694/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2694", "html_url": "https://github.com/huggingface/datasets/pull/2694", "diff_url": "https://github.com/huggingface/datasets/pull/2694.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2694.patch", "merged_at": 1626950507000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2693
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2693/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2693/comments
https://api.github.com/repos/huggingface/datasets/issues/2693/events
https://github.com/huggingface/datasets/pull/2693
949,797,014
MDExOlB1bGxSZXF1ZXN0Njk0NDQ1ODAz
2,693
Fix OSCAR Esperanto
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,878,630,000
1,626,879,232,000
1,626,879,231,000
MEMBER
null
The Esperanto part (original) of OSCAR has the wrong number of examples: ```python from datasets import load_dataset raw_datasets = load_dataset("oscar", "unshuffled_original_eo") ``` raises ```python NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='train', num_bytes=314188336, num_examples=121171, dataset_name='oscar'), 'recorded': SplitInfo(name='train', num_bytes=314064514, num_examples=121168, dataset_name='oscar')}] ``` I updated the number of expected examples in dataset_infos.json cc @sgugger
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2693/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 1, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2693/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2693", "html_url": "https://github.com/huggingface/datasets/pull/2693", "diff_url": "https://github.com/huggingface/datasets/pull/2693.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2693.patch", "merged_at": 1626879231000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2692
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2692/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2692/comments
https://api.github.com/repos/huggingface/datasets/issues/2692/events
https://github.com/huggingface/datasets/pull/2692
949,765,484
MDExOlB1bGxSZXF1ZXN0Njk0NDE4MDg1
2,692
Update BibTeX entry
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,877,415,000
1,626,881,501,000
1,626,881,500,000
MEMBER
null
Update BibTeX entry
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2692/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2692/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2692", "html_url": "https://github.com/huggingface/datasets/pull/2692", "diff_url": "https://github.com/huggingface/datasets/pull/2692.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2692.patch", "merged_at": 1626881500000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2691
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2691/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2691/comments
https://api.github.com/repos/huggingface/datasets/issues/2691/events
https://github.com/huggingface/datasets/issues/2691
949,758,379
MDU6SXNzdWU5NDk3NTgzNzk=
2,691
xtreme / pan-x cannot be downloaded
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @severo, thanks for reporting.\r\n\r\nHowever I have not been able to reproduce this issue. Could you please confirm if the problem persists for you?\r\n\r\nMaybe Dropbox (where the data source is hosted) was temporarily unavailable when you tried.", "Hmmm, the file (https://www.dropbox.com/s/dl/12h3qqog6q4bjve/panx_dataset.tar) really seems to be unavailable... I tried from various connexions and machines and got the same 404 error. Maybe the dataset has been loaded from the cache in your case?", "Yes @severo, weird... I could access the file when I answered to you, but now I cannot longer access it either... Maybe it was from the cache as you point out.\r\n\r\nAnyway, I have opened an issue in the GitHub repository responsible for the original dataset: https://github.com/afshinrahimi/mmner/issues/4\r\nI have also contacted the maintainer by email.\r\n\r\nI'll keep you informed with their answer.", "Reply from the author/maintainer: \r\n> Will fix the issue and let you know during the weekend.", "The author told that apparently Dropbox has changed their policy and no longer allow downloading the file without having signed in first. The author asked Hugging Face to host their dataset." ]
1,626,877,085,000
1,627,292,062,000
1,627,292,062,000
CONTRIBUTOR
null
## Describe the bug Dataset xtreme / pan-x cannot be loaded Seems related to https://github.com/huggingface/datasets/pull/2326 ## Steps to reproduce the bug ```python dataset = load_dataset("xtreme", "PAN-X.fr") ``` ## Expected results Load the dataset ## Actual results ``` FileNotFoundError: Couldn't find file at https://www.dropbox.com/s/12h3qqog6q4bjve/panx_dataset.tar?dl=1 ``` ## Environment info - `datasets` version: 1.9.0 - Platform: macOS-11.4-x86_64-i386-64bit - Python version: 3.8.11 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2691/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2691/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2690
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2690/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2690/comments
https://api.github.com/repos/huggingface/datasets/issues/2690/events
https://github.com/huggingface/datasets/pull/2690
949,574,500
MDExOlB1bGxSZXF1ZXN0Njk0MjU5MDc1
2,690
Docs details
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Thanks for all the comments and for the corrections in the docs !\r\n\r\nAbout all the points you mentioned:\r\n\r\n> * the code samples assume the expected libraries have already been installed. Maybe add a section at start, or add it to every code sample. Something like `pip install datasets transformers torch 'datasets[streaming]'` (maybe just link to https://huggingface.co/docs/datasets/installation.html + a one-liner that installs all the requirements / alternatively a requirements.txt file)\r\n\r\nYes good idea\r\n\r\n> * \"If you’d like to play with the examples, you must install it from source.\" in https://huggingface.co/docs/datasets/installation.html: it's not clear to me what this means (what are these \"examples\"?)\r\n\r\nIt refers to examples scripts inside the git repository of the library, see the `examples` folder in the `transformers` repo.\r\nWe don't have examples yet in the git repo of `datasets` as in transformers. So currently there are no examples. Maybe we can just remove this sentence from the docs for now\r\n\r\n> * in https://huggingface.co/docs/datasets/loading_datasets.html: \"or AWS bucket if it’s not already stored in the library\". It's the only place in the doc (aside from the docstring https://huggingface.co/docs/datasets/package_reference/loading_methods.html?highlight=aws bucket#datasets.list_datasets) where the \"AWS bucket\" is mentioned. It's not easy to understand what this means. Maybe explain more, and link to https://s3.amazonaws.com/datasets.huggingface.co and/or https://huggingface.co/docs/datasets/filesystems.html.\r\n\r\nThis is outdated and must be replaced by\r\n```\r\nor from the Hugging Face Hub if it’s not already stored in the library\r\n```\r\n\r\n> * example in https://huggingface.co/docs/datasets/loading_datasets.html#manually-downloading-files is obsoleted by [Enable auto-download for PAN-X / Wikiann domain in XTREME #2326](https://github.com/huggingface/datasets/pull/2326). Also: see [xtreme / pan-x cannot be downloaded #2691](https://github.com/huggingface/datasets/issues/2691) for a bug on this specific dataset.\r\n\r\nWe can replace the `XTREME` `PANX` dataste by `matinf` instead for example\r\n\r\n> * in https://huggingface.co/docs/datasets/loading_datasets.html#manually-downloading-files the doc says \"After you’ve downloaded the files, you can point to the folder hosting them locally with the data_dir argument as follows:\", but the following example does not show how to use `data_dir`\r\n\r\nLet's add `data_dir=\"path/to/your/downloaded/data\"` for example\r\n\r\n> * in https://huggingface.co/docs/datasets/loading_datasets.html#csv-files, it would be nice to have an URL to the csv loader reference (but I'm not sure there is one in the API reference). This comment applies in many places in the doc: I would want the API reference to contain doc for all the code/functions/classes... and I would want a lot more links inside the doc pointing to the API entries.\r\n\r\nCurrently there's no documentation for the CSV loader config. Maybe we can add the docstrings to the `CsvConfig` class to explain the parameters and how it works, and then redirect to the doc of this class in this section of the documentation.\r\n\r\n> * in the API reference (docstrings) I would prefer \"SOURCE\" to link to github instead of a copy of the code inside the docs site (eg. https://github.com/huggingface/datasets/blob/master/src/datasets/load.py#L711 instead of https://huggingface.co/docs/datasets/_modules/datasets/load.html#load_dataset)\r\n\r\nThis is the same as in `transformers`, not sure if this is a big issue\r\n\r\n> * it seems like not all the API is exposed in the doc. For example, there is no doc for [`disable_progress_bar`](https://github.com/huggingface/datasets/search?q=disable_progress_bar), see https://huggingface.co/docs/datasets/search.html?q=disable_progress_bar, even if the code contains docstrings. Does it mean that the function is not officially supported? (otherwise, maybe it also deserves a mention in https://huggingface.co/docs/datasets/package_reference/logging_methods.html)\r\n\r\nThe function `disable_progress_bar` should definitely be in the docs, thanks. We can add it to the logging methods\r\n\r\n> * in https://huggingface.co/docs/datasets/loading_datasets.html?highlight=most%20efficient%20format%20have%20json%20files%20consisting%20multiple%20json%20objects#json-files, \"The most efficient format is to have JSON files consisting of multiple JSON objects, one per line, representing individual data rows:\", maybe link to https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON and give it a name (\"line-delimited JSON\"? \"JSON Lines\" as in https://huggingface.co/docs/datasets/processing.html#exporting-a-dataset-to-csv-json-parquet-or-to-python-objects ?)\r\n\r\nYes good idea !\r\n\r\n> * in https://huggingface.co/docs/datasets/loading_datasets.html, for the local files sections, it would be nice to provide sample csv / json / text files to download, so that it's easier for the reader to try to load them (instead: they won't try)\r\n\r\nSure why not. Moreover the csv loader now supports remote files so you could just run the code pass an an URL to the sample csv file.\r\n\r\n> * the doc explains how to shard a dataset, but does not explain why and when a dataset should be sharded (I have no idea... for [parallelizing](https://huggingface.co/docs/datasets/processing.html#multiprocessing)?). It does neither give an idea of the number of shards a dataset typically should have and why.\r\n\r\nThis can be used for distributed processing or just to use a percentage of the data. We can definitely give example of use cases\r\n\r\n> * the code example in https://huggingface.co/docs/datasets/processing.html#mapping-in-a-distributed-setting does not work, because `training_args` has not been defined before in the doc.\r\n\r\n`training_args` comes from `transformers`, it's a practical way to define all your arguments to train a model. Maybe we can just import it from `transformers` and use it with the default values\r\n\r\n" ]
1,626,864,194,000
1,627,411,254,000
1,627,411,254,000
CONTRIBUTOR
null
Some comments here: - the code samples assume the expected libraries have already been installed. Maybe add a section at start, or add it to every code sample. Something like `pip install datasets transformers torch 'datasets[streaming]'` (maybe just link to https://huggingface.co/docs/datasets/installation.html + a one-liner that installs all the requirements / alternatively a requirements.txt file) - "If you’d like to play with the examples, you must install it from source." in https://huggingface.co/docs/datasets/installation.html: it's not clear to me what this means (what are these "examples"?) - in https://huggingface.co/docs/datasets/loading_datasets.html: "or AWS bucket if it’s not already stored in the library". It's the only place in the doc (aside from the docstring https://huggingface.co/docs/datasets/package_reference/loading_methods.html?highlight=aws bucket#datasets.list_datasets) where the "AWS bucket" is mentioned. It's not easy to understand what this means. Maybe explain more, and link to https://s3.amazonaws.com/datasets.huggingface.co and/or https://huggingface.co/docs/datasets/filesystems.html. - example in https://huggingface.co/docs/datasets/loading_datasets.html#manually-downloading-files is obsoleted by https://github.com/huggingface/datasets/pull/2326. Also: see https://github.com/huggingface/datasets/issues/2691 for a bug on this specific dataset. - in https://huggingface.co/docs/datasets/loading_datasets.html#manually-downloading-files the doc says "After you’ve downloaded the files, you can point to the folder hosting them locally with the data_dir argument as follows:", but the following example does not show how to use `data_dir` - in https://huggingface.co/docs/datasets/loading_datasets.html#csv-files, it would be nice to have an URL to the csv loader reference (but I'm not sure there is one in the API reference). This comment applies in many places in the doc: I would want the API reference to contain doc for all the code/functions/classes... and I would want a lot more links inside the doc pointing to the API entries. - in the API reference (docstrings) I would prefer "SOURCE" to link to github instead of a copy of the code inside the docs site (eg. https://github.com/huggingface/datasets/blob/master/src/datasets/load.py#L711 instead of https://huggingface.co/docs/datasets/_modules/datasets/load.html#load_dataset) - it seems like not all the API is exposed in the doc. For example, there is no doc for [`disable_progress_bar`](https://github.com/huggingface/datasets/search?q=disable_progress_bar), see https://huggingface.co/docs/datasets/search.html?q=disable_progress_bar, even if the code contains docstrings. Does it mean that the function is not officially supported? (otherwise, maybe it also deserves a mention in https://huggingface.co/docs/datasets/package_reference/logging_methods.html) - in https://huggingface.co/docs/datasets/loading_datasets.html?highlight=most%20efficient%20format%20have%20json%20files%20consisting%20multiple%20json%20objects#json-files, "The most efficient format is to have JSON files consisting of multiple JSON objects, one per line, representing individual data rows:", maybe link to https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON and give it a name ("line-delimited JSON"? "JSON Lines" as in https://huggingface.co/docs/datasets/processing.html#exporting-a-dataset-to-csv-json-parquet-or-to-python-objects ?) - in https://huggingface.co/docs/datasets/loading_datasets.html, for the local files sections, it would be nice to provide sample csv / json / text files to download, so that it's easier for the reader to try to load them (instead: they won't try) - the doc explains how to shard a dataset, but does not explain why and when a dataset should be sharded (I have no idea... for [parallelizing](https://huggingface.co/docs/datasets/processing.html#multiprocessing)?). It does neither give an idea of the number of shards a dataset typically should have and why. - the code example in https://huggingface.co/docs/datasets/processing.html#mapping-in-a-distributed-setting does not work, because `training_args` has not been defined before in the doc.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2690/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2690/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2690", "html_url": "https://github.com/huggingface/datasets/pull/2690", "diff_url": "https://github.com/huggingface/datasets/pull/2690.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2690.patch", "merged_at": 1627411253000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2689
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2689/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2689/comments
https://api.github.com/repos/huggingface/datasets/issues/2689/events
https://github.com/huggingface/datasets/issues/2689
949,447,104
MDU6SXNzdWU5NDk0NDcxMDQ=
2,689
cannot save the dataset to disk after rename_column
{ "login": "PaulLerner", "id": 25532159, "node_id": "MDQ6VXNlcjI1NTMyMTU5", "avatar_url": "https://avatars.githubusercontent.com/u/25532159?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PaulLerner", "html_url": "https://github.com/PaulLerner", "followers_url": "https://api.github.com/users/PaulLerner/followers", "following_url": "https://api.github.com/users/PaulLerner/following{/other_user}", "gists_url": "https://api.github.com/users/PaulLerner/gists{/gist_id}", "starred_url": "https://api.github.com/users/PaulLerner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PaulLerner/subscriptions", "organizations_url": "https://api.github.com/users/PaulLerner/orgs", "repos_url": "https://api.github.com/users/PaulLerner/repos", "events_url": "https://api.github.com/users/PaulLerner/events{/privacy}", "received_events_url": "https://api.github.com/users/PaulLerner/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi ! That's because you are trying to overwrite a file that is already open and being used.\r\nIndeed `foo/dataset.arrow` is open and used by your `dataset` object.\r\n\r\nWhen you do `rename_column`, the resulting dataset reads the data from the same arrow file.\r\nIn other cases like when using `map` on the other hand, the resulting dataset reads the data from another arrow file that is the result of the map transform.\r\n\r\nTherefore overwriting a dataset after `rename_column` is not possible, but it is possible after `map`, since `rename_column` doesn't switch to using another arrow file (the actual data stay the same).", "Ok, thanks for clearing it up :)" ]
1,626,855,220,000
1,626,873,064,000
1,626,873,064,000
CONTRIBUTOR
null
## Describe the bug If you use `rename_column` and do no other modification, you will be unable to save the dataset using `save_to_disk` ## Steps to reproduce the bug ```python # Sample code to reproduce the bug In [1]: from datasets import Dataset, load_from_disk In [5]: dataset=Dataset.from_dict({'foo': [0]}) In [7]: dataset.save_to_disk('foo') In [8]: dataset=load_from_disk('foo') In [10]: dataset=dataset.rename_column('foo', 'bar') In [11]: dataset.save_to_disk('foo') --------------------------------------------------------------------------- PermissionError Traceback (most recent call last) <ipython-input-11-a3bc0d4fc339> in <module> ----> 1 dataset.save_to_disk('foo') /mnt/beegfs/projects/meerqat/anaconda3/envs/meerqat/lib/python3.7/site-packages/datasets/arrow_dataset.py in save_to_disk(self, dataset_path , fs) 597 if Path(dataset_path, config.DATASET_ARROW_FILENAME) in cache_files_paths: 598 raise PermissionError( --> 599 f"Tried to overwrite {Path(dataset_path, config.DATASET_ARROW_FILENAME)} but a dataset can't overwrite itself." 600 ) 601 if Path(dataset_path, config.DATASET_INDICES_FILENAME) in cache_files_paths: PermissionError: Tried to overwrite foo/dataset.arrow but a dataset can't overwrite itself. ``` N. B. I created the dataset from dict to enable easy reproduction but the same happens if you load an existing dataset (e.g. starting from `In [8]`) ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.8.0 - Platform: Linux-3.10.0-1160.11.1.el7.x86_64-x86_64-with-centos-7.9.2009-Core - Python version: 3.7.10 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2689/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2689/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2688
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2688/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2688/comments
https://api.github.com/repos/huggingface/datasets/issues/2688/events
https://github.com/huggingface/datasets/issues/2688
949,182,074
MDU6SXNzdWU5NDkxODIwNzQ=
2,688
hebrew language codes he and iw should be treated as aliases
{ "login": "eyaler", "id": 4436747, "node_id": "MDQ6VXNlcjQ0MzY3NDc=", "avatar_url": "https://avatars.githubusercontent.com/u/4436747?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eyaler", "html_url": "https://github.com/eyaler", "followers_url": "https://api.github.com/users/eyaler/followers", "following_url": "https://api.github.com/users/eyaler/following{/other_user}", "gists_url": "https://api.github.com/users/eyaler/gists{/gist_id}", "starred_url": "https://api.github.com/users/eyaler/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eyaler/subscriptions", "organizations_url": "https://api.github.com/users/eyaler/orgs", "repos_url": "https://api.github.com/users/eyaler/repos", "events_url": "https://api.github.com/users/eyaler/events{/privacy}", "received_events_url": "https://api.github.com/users/eyaler/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @eyaler, thanks for reporting.\r\n\r\nWhile you are true with respect the Hebrew language tag (\"iw\" is deprecated and \"he\" is the preferred value), in the \"mc4\" dataset (which is a derived dataset) we have kept the language tags present in the original dataset: [Google C4](https://www.tensorflow.org/datasets/catalog/c4).", "For discoverability on the website I updated the YAML tags at the top of the mC4 dataset card https://github.com/huggingface/datasets/commit/38288087b1b02f97586e0346e8f28f4960f1fd37\r\n\r\nOnce the website is updated, mC4 will be listed in https://huggingface.co/datasets?filter=languages:he\r\n\r\n" ]
1,626,822,832,000
1,626,885,293,000
1,626,885,293,000
NONE
null
https://huggingface.co/datasets/mc4 not listed when searching for hebrew datasets (he) as it uses the older language code iw, preventing discoverability.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2688/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2688/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2687
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2687/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2687/comments
https://api.github.com/repos/huggingface/datasets/issues/2687/events
https://github.com/huggingface/datasets/pull/2687
948,890,481
MDExOlB1bGxSZXF1ZXN0NjkzNjY1NDI2
2,687
Minor documentation fix
{ "login": "slowwavesleep", "id": 44175589, "node_id": "MDQ6VXNlcjQ0MTc1NTg5", "avatar_url": "https://avatars.githubusercontent.com/u/44175589?v=4", "gravatar_id": "", "url": "https://api.github.com/users/slowwavesleep", "html_url": "https://github.com/slowwavesleep", "followers_url": "https://api.github.com/users/slowwavesleep/followers", "following_url": "https://api.github.com/users/slowwavesleep/following{/other_user}", "gists_url": "https://api.github.com/users/slowwavesleep/gists{/gist_id}", "starred_url": "https://api.github.com/users/slowwavesleep/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/slowwavesleep/subscriptions", "organizations_url": "https://api.github.com/users/slowwavesleep/orgs", "repos_url": "https://api.github.com/users/slowwavesleep/repos", "events_url": "https://api.github.com/users/slowwavesleep/events{/privacy}", "received_events_url": "https://api.github.com/users/slowwavesleep/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,803,003,000
1,626,872,695,000
1,626,872,695,000
CONTRIBUTOR
null
Currently, [Writing a dataset loading script](https://huggingface.co/docs/datasets/add_dataset.html) page has a small error. A link to `matinf` dataset in [_Dataset scripts of reference_](https://huggingface.co/docs/datasets/add_dataset.html#dataset-scripts-of-reference) section actually leads to `xsquad`, instead. This PR fixes that.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2687/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2687/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2687", "html_url": "https://github.com/huggingface/datasets/pull/2687", "diff_url": "https://github.com/huggingface/datasets/pull/2687.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2687.patch", "merged_at": 1626872695000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2686
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2686/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2686/comments
https://api.github.com/repos/huggingface/datasets/issues/2686/events
https://github.com/huggingface/datasets/pull/2686
948,811,669
MDExOlB1bGxSZXF1ZXN0NjkzNTk4OTE3
2,686
Fix bad config ids that name cache directories
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,796,845,000
1,626,798,435,000
1,626,798,435,000
MEMBER
null
`data_dir=None` was considered a dataset config parameter, hence creating a special config_id for all dataset being loaded. Since the config_id is used to name the cache directories, this leaded to datasets being regenerated for users. I fixed this by ignoring the value of `data_dir` when it's `None` when computing the config_id. I also added a test to make sure the cache directories are not unexpectedly renamed in the future. Fix https://github.com/huggingface/datasets/issues/2683
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2686/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2686/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2686", "html_url": "https://github.com/huggingface/datasets/pull/2686", "diff_url": "https://github.com/huggingface/datasets/pull/2686.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2686.patch", "merged_at": 1626798434000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2685
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2685/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2685/comments
https://api.github.com/repos/huggingface/datasets/issues/2685/events
https://github.com/huggingface/datasets/pull/2685
948,791,572
MDExOlB1bGxSZXF1ZXN0NjkzNTgxNTk2
2,685
Fix Blog Authorship Corpus dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Normally, I'm expecting errors from the validation of the README file... 😅 ", "That is:\r\n```\r\n=========================== short test summary info ============================\r\nFAILED tests/test_dataset_cards.py::test_changed_dataset_card[blog_authorship_corpus]\r\n==== 1 failed, 3182 passed, 2763 skipped, 16 warnings in 201.23s (0:03:21) =====\r\n```", "@lhoestq, apart from the dataset card, everything is OK with this PR: I tested it locally." ]
1,626,795,890,000
1,626,873,118,000
1,626,873,118,000
MEMBER
null
This PR: - Update the JSON metadata file, which previously was raising a `NonMatchingSplitsSizesError` - Fix the codec of the data files (`latin_1` instead of `utf-8`), which previously was raising ` UnicodeDecodeError` for some files Close #2679.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2685/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2685/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2685", "html_url": "https://github.com/huggingface/datasets/pull/2685", "diff_url": "https://github.com/huggingface/datasets/pull/2685.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2685.patch", "merged_at": 1626873117000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2684
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2684/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2684/comments
https://api.github.com/repos/huggingface/datasets/issues/2684/events
https://github.com/huggingface/datasets/pull/2684
948,771,753
MDExOlB1bGxSZXF1ZXN0NjkzNTY0MDY4
2,684
Print absolute local paths in load_dataset error messages
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,794,908,000
1,626,986,899,000
1,626,962,470,000
CONTRIBUTOR
null
Use absolute local paths in the error messages of `load_dataset` as per @stas00's suggestion in https://github.com/huggingface/datasets/pull/2500#issuecomment-874891223
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2684/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2684/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2684", "html_url": "https://github.com/huggingface/datasets/pull/2684", "diff_url": "https://github.com/huggingface/datasets/pull/2684.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2684.patch", "merged_at": 1626962470000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2683
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2683/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2683/comments
https://api.github.com/repos/huggingface/datasets/issues/2683/events
https://github.com/huggingface/datasets/issues/2683
948,721,379
MDU6SXNzdWU5NDg3MjEzNzk=
2,683
Cache directories changed due to recent changes in how config kwargs are handled
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,791,877,000
1,626,798,435,000
1,626,798,435,000
MEMBER
null
Since #2659 I can see weird cache directory names with hashes in the config id, even though no additional config kwargs are passed. For example: ```python from datasets import load_dataset_builder c4_builder = load_dataset_builder("c4", "en") print(c4_builder.cache_dir) # /Users/quentinlhoest/.cache/huggingface/datasets/c4/en-174d3b7155eb68db/0.0.0/... # instead of # /Users/quentinlhoest/.cache/huggingface/datasets/c4/en/0.0.0/... ``` This issue could be annoying since it would simply ignore old cache directories for users, and regenerate datasets cc @stas00 this is what you experienced a few days ago
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2683/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2683/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2682
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2682/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2682/comments
https://api.github.com/repos/huggingface/datasets/issues/2682/events
https://github.com/huggingface/datasets/pull/2682
948,713,137
MDExOlB1bGxSZXF1ZXN0NjkzNTE2NjU2
2,682
Fix c4 expected files
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,791,371,000
1,626,791,891,000
1,626,791,890,000
MEMBER
null
Some files were not registered in the list of expected files to download Fix https://github.com/huggingface/datasets/issues/2677
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2682/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2682/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2682", "html_url": "https://github.com/huggingface/datasets/pull/2682", "diff_url": "https://github.com/huggingface/datasets/pull/2682.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2682.patch", "merged_at": 1626791890000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2681
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2681/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2681/comments
https://api.github.com/repos/huggingface/datasets/issues/2681/events
https://github.com/huggingface/datasets/issues/2681
948,708,645
MDU6SXNzdWU5NDg3MDg2NDU=
2,681
5 duplicate datasets
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Yes this was documented in the PR that added this hf->paperswithcode mapping (https://github.com/huggingface/datasets/pull/2404) and AFAICT those are slightly distinct datasets so I think it's a wontfix\r\n\r\nFor context on the paperswithcode mapping you can also refer to https://github.com/huggingface/huggingface_hub/pull/43 which contains a lot of background discussion ", "Thanks for the antecedents. I close." ]
1,626,791,100,000
1,626,795,857,000
1,626,795,857,000
CONTRIBUTOR
null
## Describe the bug In 5 cases, I could find a dataset on Paperswithcode which references two Hugging Face datasets as dataset loaders. They are: - https://paperswithcode.com/dataset/multinli -> https://huggingface.co/datasets/multi_nli and https://huggingface.co/datasets/multi_nli_mismatch <img width="838" alt="Capture d’écran 2021-07-20 à 16 33 58" src="https://user-images.githubusercontent.com/1676121/126342757-4625522a-f788-41a3-bd1f-2a8b9817bbf5.png"> - https://paperswithcode.com/dataset/squad -> https://huggingface.co/datasets/squad and https://huggingface.co/datasets/squad_v2 - https://paperswithcode.com/dataset/narrativeqa -> https://huggingface.co/datasets/narrativeqa and https://huggingface.co/datasets/narrativeqa_manual - https://paperswithcode.com/dataset/hate-speech-and-offensive-language -> https://huggingface.co/datasets/hate_offensive and https://huggingface.co/datasets/hate_speech_offensive - https://paperswithcode.com/dataset/newsph-nli -> https://huggingface.co/datasets/newsph and https://huggingface.co/datasets/newsph_nli Possible solutions: - don't fix (it works) - for each pair of duplicate datasets, remove one, and create an alias to the other. ## Steps to reproduce the bug Visit the Paperswithcode links, and look at the "Dataset Loaders" section ## Expected results There should only be one reference to a Hugging Face dataset loader ## Actual results Two Hugging Face dataset loaders
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2681/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2681/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2680
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2680/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2680/comments
https://api.github.com/repos/huggingface/datasets/issues/2680/events
https://github.com/huggingface/datasets/pull/2680
948,649,716
MDExOlB1bGxSZXF1ZXN0NjkzNDYyNzY3
2,680
feat: 🎸 add paperswithcode id for qasper dataset
{ "login": "severo", "id": 1676121, "node_id": "MDQ6VXNlcjE2NzYxMjE=", "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "gravatar_id": "", "url": "https://api.github.com/users/severo", "html_url": "https://github.com/severo", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "organizations_url": "https://api.github.com/users/severo/orgs", "repos_url": "https://api.github.com/users/severo/repos", "events_url": "https://api.github.com/users/severo/events{/privacy}", "received_events_url": "https://api.github.com/users/severo/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,787,349,000
1,626,789,850,000
1,626,789,850,000
CONTRIBUTOR
null
The reverse reference exists on paperswithcode: https://paperswithcode.com/dataset/qasper
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2680/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2680/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2680", "html_url": "https://github.com/huggingface/datasets/pull/2680", "diff_url": "https://github.com/huggingface/datasets/pull/2680.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2680.patch", "merged_at": 1626789850000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2679
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2679/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2679/comments
https://api.github.com/repos/huggingface/datasets/issues/2679/events
https://github.com/huggingface/datasets/issues/2679
948,506,638
MDU6SXNzdWU5NDg1MDY2Mzg=
2,679
Cannot load the blog_authorship_corpus due to codec errors
{ "login": "izaskr", "id": 38069449, "node_id": "MDQ6VXNlcjM4MDY5NDQ5", "avatar_url": "https://avatars.githubusercontent.com/u/38069449?v=4", "gravatar_id": "", "url": "https://api.github.com/users/izaskr", "html_url": "https://github.com/izaskr", "followers_url": "https://api.github.com/users/izaskr/followers", "following_url": "https://api.github.com/users/izaskr/following{/other_user}", "gists_url": "https://api.github.com/users/izaskr/gists{/gist_id}", "starred_url": "https://api.github.com/users/izaskr/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/izaskr/subscriptions", "organizations_url": "https://api.github.com/users/izaskr/orgs", "repos_url": "https://api.github.com/users/izaskr/repos", "events_url": "https://api.github.com/users/izaskr/events{/privacy}", "received_events_url": "https://api.github.com/users/izaskr/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @izaskr, thanks for reporting.\r\n\r\nHowever the traceback you joined does not correspond to the codec error message: it is about other error `NonMatchingSplitsSizesError`. Maybe you missed some important part of your traceback...\r\n\r\nI'm going to have a look at the dataset anyway...", "Hi @izaskr, thanks again for having reported this issue.\r\n\r\nAfter investigation, I have created a Pull Request (#2685) to fix several issues with this dataset:\r\n- the `NonMatchingSplitsSizesError`\r\n- the `UnicodeDecodeError`\r\n\r\nOnce the Pull Request merged into master, you will be able to load this dataset if you install `datasets` from our GitHub repository master branch. Otherwise, you will be able to use it after our next release, by updating `datasets`: `pip install -U datasets`.", "@albertvillanova \r\nCan you shed light on how this fix works?\r\n\r\nWe're experiencing a similar issue. \r\n\r\nIf we run several runs (eg in a Wandb sweep) the first run \"works\" but then we get `NonMatchingSplitsSizesError`\r\n\r\n| run num | actual train examples # | expected example # | recorded example # |\r\n| ------- | -------------- | ----------------- | -------- |\r\n| 1 | 100 | 100 | 100 |\r\n| 2 | 102 | 100 | 102 |\r\n| 3 | 100 | 100 | 202 | \r\n| 4 | 40 | 100 | 40 |\r\n| 5 | 40 | 100 | 40 |\r\n| 6 | 40 | 100 | 40 | \r\n\r\n\r\nThe second through the nth all crash with \r\n\r\n```\r\ndatasets.utils.info_utils.NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='train', num_bytes=19980970, num_examples=100, dataset_name='cies'), 'recorded': SplitInfo(name='train', num_bytes=40163811, num_examples=202, dataset_name='cies')}]\r\n\r\n```" ]
1,626,776,000,000
1,626,886,941,000
1,626,873,118,000
NONE
null
## Describe the bug A codec error is raised while loading the blog_authorship_corpus. ## Steps to reproduce the bug ``` from datasets import load_dataset raw_datasets = load_dataset("blog_authorship_corpus") ``` ## Expected results Loading the dataset without errors. ## Actual results An error similar to the one below was raised for (what seems like) every XML file. /home/izaskr/.cache/huggingface/datasets/downloads/extracted/7cf52524f6517e168604b41c6719292e8f97abbe8f731e638b13423f4212359a/blogs/788358.male.24.Arts.Libra.xml cannot be loaded. Error message: 'utf-8' codec can't decode byte 0xe7 in position 7551: invalid continuation byte Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/izaskr/anaconda3/envs/local_vae_older/lib/python3.8/site-packages/datasets/load.py", line 856, in load_dataset builder_instance.download_and_prepare( File "/home/izaskr/anaconda3/envs/local_vae_older/lib/python3.8/site-packages/datasets/builder.py", line 583, in download_and_prepare self._download_and_prepare( File "/home/izaskr/anaconda3/envs/local_vae_older/lib/python3.8/site-packages/datasets/builder.py", line 671, in _download_and_prepare verify_splits(self.info.splits, split_dict) File "/home/izaskr/anaconda3/envs/local_vae_older/lib/python3.8/site-packages/datasets/utils/info_utils.py", line 74, in verify_splits raise NonMatchingSplitsSizesError(str(bad_splits)) datasets.utils.info_utils.NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='train', num_bytes=610252351, num_examples=532812, dataset_name='blog_authorship_corpus'), 'recorded': SplitInfo(name='train', num_bytes=614706451, num_examples=535568, dataset_name='blog_authorship_corpus')}, {'expected': SplitInfo(name='validation', num_bytes=37500394, num_examples=31277, dataset_name='blog_authorship_corpus'), 'recorded': SplitInfo(name='validation', num_bytes=32553710, num_examples=28521, dataset_name='blog_authorship_corpus')}] ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.0 - Platform: Linux-4.15.0-132-generic-x86_64-with-glibc2.10 - Python version: 3.8.8 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2679/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2679/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2678
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2678/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2678/comments
https://api.github.com/repos/huggingface/datasets/issues/2678/events
https://github.com/huggingface/datasets/issues/2678
948,471,222
MDU6SXNzdWU5NDg0NzEyMjI=
2,678
Import Error in Kaggle notebook
{ "login": "prikmm", "id": 47216475, "node_id": "MDQ6VXNlcjQ3MjE2NDc1", "avatar_url": "https://avatars.githubusercontent.com/u/47216475?v=4", "gravatar_id": "", "url": "https://api.github.com/users/prikmm", "html_url": "https://github.com/prikmm", "followers_url": "https://api.github.com/users/prikmm/followers", "following_url": "https://api.github.com/users/prikmm/following{/other_user}", "gists_url": "https://api.github.com/users/prikmm/gists{/gist_id}", "starred_url": "https://api.github.com/users/prikmm/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/prikmm/subscriptions", "organizations_url": "https://api.github.com/users/prikmm/orgs", "repos_url": "https://api.github.com/users/prikmm/repos", "events_url": "https://api.github.com/users/prikmm/events{/privacy}", "received_events_url": "https://api.github.com/users/prikmm/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "This looks like an issue with PyArrow. Did you try reinstalling it ?", "@lhoestq I did, and then let pip handle the installation in `pip import datasets`. I also tried using conda but it gives the same error.\r\n\r\nEdit: pyarrow version on kaggle is 4.0.0, it gets replaced with 4.0.1. So, I don't think uninstalling will change anything.\r\n```\r\nInstall Trace of datasets:\r\n\r\nCollecting datasets\r\n Downloading datasets-1.9.0-py3-none-any.whl (262 kB)\r\n |████████████████████████████████| 262 kB 834 kB/s eta 0:00:01\r\nRequirement already satisfied: dill in /opt/conda/lib/python3.7/site-packages (from datasets) (0.3.4)\r\nCollecting pyarrow!=4.0.0,>=1.0.0\r\n Downloading pyarrow-4.0.1-cp37-cp37m-manylinux2014_x86_64.whl (21.8 MB)\r\n |████████████████████████████████| 21.8 MB 6.2 MB/s eta 0:00:01\r\nRequirement already satisfied: importlib-metadata in /opt/conda/lib/python3.7/site-packages (from datasets) (3.4.0)\r\nRequirement already satisfied: huggingface-hub<0.1.0 in /opt/conda/lib/python3.7/site-packages (from datasets) (0.0.8)\r\nRequirement already satisfied: pandas in /opt/conda/lib/python3.7/site-packages (from datasets) (1.2.4)\r\nRequirement already satisfied: requests>=2.19.0 in /opt/conda/lib/python3.7/site-packages (from datasets) (2.25.1)\r\nRequirement already satisfied: fsspec>=2021.05.0 in /opt/conda/lib/python3.7/site-packages (from datasets) (2021.6.1)\r\nRequirement already satisfied: multiprocess in /opt/conda/lib/python3.7/site-packages (from datasets) (0.70.12.2)\r\nRequirement already satisfied: packaging in /opt/conda/lib/python3.7/site-packages (from datasets) (20.9)\r\nCollecting xxhash\r\n Downloading xxhash-2.0.2-cp37-cp37m-manylinux2010_x86_64.whl (243 kB)\r\n |████████████████████████████████| 243 kB 23.7 MB/s eta 0:00:01\r\nRequirement already satisfied: numpy>=1.17 in /opt/conda/lib/python3.7/site-packages (from datasets) (1.19.5)\r\nRequirement already satisfied: tqdm>=4.27 in /opt/conda/lib/python3.7/site-packages (from datasets) (4.61.1)\r\nRequirement already satisfied: filelock in /opt/conda/lib/python3.7/site-packages (from huggingface-hub<0.1.0->datasets) (3.0.12)\r\nRequirement already satisfied: urllib3<1.27,>=1.21.1 in /opt/conda/lib/python3.7/site-packages (from requests>=2.19.0->datasets) (1.26.5)\r\nRequirement already satisfied: idna<3,>=2.5 in /opt/conda/lib/python3.7/site-packages (from requests>=2.19.0->datasets) (2.10)\r\nRequirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.7/site-packages (from requests>=2.19.0->datasets) (2021.5.30)\r\nRequirement already satisfied: chardet<5,>=3.0.2 in /opt/conda/lib/python3.7/site-packages (from requests>=2.19.0->datasets) (4.0.0)\r\nRequirement already satisfied: typing-extensions>=3.6.4 in /opt/conda/lib/python3.7/site-packages (from importlib-metadata->datasets) (3.7.4.3)\r\nRequirement already satisfied: zipp>=0.5 in /opt/conda/lib/python3.7/site-packages (from importlib-metadata->datasets) (3.4.1)\r\nRequirement already satisfied: pyparsing>=2.0.2 in /opt/conda/lib/python3.7/site-packages (from packaging->datasets) (2.4.7)\r\nRequirement already satisfied: python-dateutil>=2.7.3 in /opt/conda/lib/python3.7/site-packages (from pandas->datasets) (2.8.1)\r\nRequirement already satisfied: pytz>=2017.3 in /opt/conda/lib/python3.7/site-packages (from pandas->datasets) (2021.1)\r\nRequirement already satisfied: six>=1.5 in /opt/conda/lib/python3.7/site-packages (from python-dateutil>=2.7.3->pandas->datasets) (1.15.0)\r\nInstalling collected packages: xxhash, pyarrow, datasets\r\n Attempting uninstall: pyarrow\r\n Found existing installation: pyarrow 4.0.0\r\n Uninstalling pyarrow-4.0.0:\r\n Successfully uninstalled pyarrow-4.0.0\r\nSuccessfully installed datasets-1.9.0 pyarrow-4.0.1 xxhash-2.0.2\r\nWARNING: Running pip as root will break packages and permissions. You should install packages reliably by using venv: https://pip.pypa.io/warnings/venv\r\n```", "You may need to restart your kaggle notebook after installing a newer version of `pyarrow`.\r\n\r\nIf it doesn't work we'll probably have to create an issue on [arrow's JIRA](https://issues.apache.org/jira/projects/ARROW/issues/), and maybe ask kaggle why it could fail", "> You may need to restart your kaggle notebook before after installing a newer version of `pyarrow`.\r\n> \r\n> If it doesn't work we'll probably have to create an issue on [arrow's JIRA](https://issues.apache.org/jira/projects/ARROW/issues/), and maybe ask kaggle why it could fail\r\n\r\nIt works after restarting.\r\nMy bad, I forgot to restart the notebook. Sorry for the trouble!" ]
1,626,773,318,000
1,626,875,966,000
1,626,872,582,000
NONE
null
## Describe the bug Not able to import datasets library in kaggle notebooks ## Steps to reproduce the bug ```python !pip install datasets import datasets ``` ## Expected results No such error ## Actual results ``` ImportError Traceback (most recent call last) <ipython-input-9-652e886d387f> in <module> ----> 1 import datasets /opt/conda/lib/python3.7/site-packages/datasets/__init__.py in <module> 31 ) 32 ---> 33 from .arrow_dataset import Dataset, concatenate_datasets 34 from .arrow_reader import ArrowReader, ReadInstruction 35 from .arrow_writer import ArrowWriter /opt/conda/lib/python3.7/site-packages/datasets/arrow_dataset.py in <module> 36 import pandas as pd 37 import pyarrow as pa ---> 38 import pyarrow.compute as pc 39 from multiprocess import Pool, RLock 40 from tqdm.auto import tqdm /opt/conda/lib/python3.7/site-packages/pyarrow/compute.py in <module> 16 # under the License. 17 ---> 18 from pyarrow._compute import ( # noqa 19 Function, 20 FunctionOptions, ImportError: /opt/conda/lib/python3.7/site-packages/pyarrow/_compute.cpython-37m-x86_64-linux-gnu.so: undefined symbol: _ZNK5arrow7compute15KernelSignature8ToStringEv ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.0 - Platform: Kaggle - Python version: 3.7.10 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2678/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2678/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2677
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2677/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2677/comments
https://api.github.com/repos/huggingface/datasets/issues/2677/events
https://github.com/huggingface/datasets/issues/2677
948,429,788
MDU6SXNzdWU5NDg0Mjk3ODg=
2,677
Error when downloading C4
{ "login": "Aktsvigun", "id": 36672861, "node_id": "MDQ6VXNlcjM2NjcyODYx", "avatar_url": "https://avatars.githubusercontent.com/u/36672861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Aktsvigun", "html_url": "https://github.com/Aktsvigun", "followers_url": "https://api.github.com/users/Aktsvigun/followers", "following_url": "https://api.github.com/users/Aktsvigun/following{/other_user}", "gists_url": "https://api.github.com/users/Aktsvigun/gists{/gist_id}", "starred_url": "https://api.github.com/users/Aktsvigun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Aktsvigun/subscriptions", "organizations_url": "https://api.github.com/users/Aktsvigun/orgs", "repos_url": "https://api.github.com/users/Aktsvigun/repos", "events_url": "https://api.github.com/users/Aktsvigun/events{/privacy}", "received_events_url": "https://api.github.com/users/Aktsvigun/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi Thanks for reporting !\r\nIt looks like these files are not correctly reported in the list of expected files to download, let me fix that ;)", "Alright this is fixed now. We'll do a new release soon to make the fix available.\r\n\r\nIn the meantime feel free to simply pass `ignore_verifications=True` to `load_dataset` to skip this error", "@lhoestq thank you for such a quick feedback!" ]
1,626,770,250,000
1,626,792,091,000
1,626,791,890,000
NONE
null
Hi, I am trying to download `en` corpus from C4 dataset. However, I get an error caused by validation files download (see image). My code is very primitive: `datasets.load_dataset('c4', 'en')` Is this a bug or do I have some configurations missing on my server? Thanks! <img width="1014" alt="Снимок экрана 2021-07-20 в 11 37 17" src="https://user-images.githubusercontent.com/36672861/126289448-6e0db402-5f3f-485a-bf74-eb6e0271fc25.png">
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2677/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2677/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2676
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2676/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2676/comments
https://api.github.com/repos/huggingface/datasets/issues/2676/events
https://github.com/huggingface/datasets/pull/2676
947,734,909
MDExOlB1bGxSZXF1ZXN0NjkyNjc2NTg5
2,676
Increase json reader block_size automatically
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,706,274,000
1,626,717,099,000
1,626,717,098,000
MEMBER
null
Currently some files can't be read with the default parameters of the JSON lines reader. For example this one: https://huggingface.co/datasets/thomwolf/codeparrot/resolve/main/file-000000000006.json.gz raises a pyarrow error: ```python ArrowInvalid: straddling object straddles two block boundaries (try to increase block size?) ``` The block size that is used is the default one by pyarrow (related to this [jira issue](https://issues.apache.org/jira/browse/ARROW-9612)). To fix this issue I changed the block_size to increase automatically if there is a straddling issue when parsing a batch of json lines. By default the value is `chunksize // 32` in order to leverage multithreading, and it doubles every time a straddling issue occurs. The block_size is then reset for each file. cc @thomwolf @albertvillanova
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2676/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2676/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2676", "html_url": "https://github.com/huggingface/datasets/pull/2676", "diff_url": "https://github.com/huggingface/datasets/pull/2676.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2676.patch", "merged_at": 1626717098000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2675
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2675/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2675/comments
https://api.github.com/repos/huggingface/datasets/issues/2675/events
https://github.com/huggingface/datasets/pull/2675
947,657,732
MDExOlB1bGxSZXF1ZXN0NjkyNjEwNTA1
2,675
Parallelize ETag requests
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,701,442,000
1,626,723,205,000
1,626,723,205,000
MEMBER
null
Since https://github.com/huggingface/datasets/pull/2628 we use the ETag or the remote data files to compute the directory in the cache where a dataset is saved. This is useful in order to reload the dataset from the cache only if the remote files haven't changed. In this I made the ETag requests parallel using multithreading. There is also a tqdm progress bar that shows up if there are more than 16 data files.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2675/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2675/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2675", "html_url": "https://github.com/huggingface/datasets/pull/2675", "diff_url": "https://github.com/huggingface/datasets/pull/2675.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2675.patch", "merged_at": 1626723205000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2674
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2674/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2674/comments
https://api.github.com/repos/huggingface/datasets/issues/2674/events
https://github.com/huggingface/datasets/pull/2674
947,338,202
MDExOlB1bGxSZXF1ZXN0NjkyMzMzODU3
2,674
Fix sacrebleu parameter name
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,678,446,000
1,626,682,023,000
1,626,682,023,000
MEMBER
null
DONE: - Fix parameter name: `smooth` to `smooth_method`. - Improve kwargs description. - Align docs on using a metric. - Add example of passing additional arguments in using metrics. Related to #2669.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2674/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2674/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2674", "html_url": "https://github.com/huggingface/datasets/pull/2674", "diff_url": "https://github.com/huggingface/datasets/pull/2674.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2674.patch", "merged_at": 1626682023000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2673
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2673/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2673/comments
https://api.github.com/repos/huggingface/datasets/issues/2673/events
https://github.com/huggingface/datasets/pull/2673
947,300,008
MDExOlB1bGxSZXF1ZXN0NjkyMzAxMTgw
2,673
Fix potential DuplicatedKeysError in SQuAD
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,674,880,000
1,626,678,483,000
1,626,678,483,000
MEMBER
null
DONE: - Fix potential DiplicatedKeysError by ensuring keys are unique. - Align examples in the docs with SQuAD code. We should promote as a good practice, that the keys should be programmatically generated as unique, instead of read from data (which might be not unique).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2673/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2673/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2673", "html_url": "https://github.com/huggingface/datasets/pull/2673", "diff_url": "https://github.com/huggingface/datasets/pull/2673.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2673.patch", "merged_at": 1626678483000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2672
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2672/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2672/comments
https://api.github.com/repos/huggingface/datasets/issues/2672/events
https://github.com/huggingface/datasets/pull/2672
947,294,605
MDExOlB1bGxSZXF1ZXN0NjkyMjk2NDQ4
2,672
Fix potential DuplicatedKeysError in LibriSpeech
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,674,449,000
1,626,676,137,000
1,626,676,136,000
MEMBER
null
DONE: - Fix unnecessary path join. - Fix potential DiplicatedKeysError by ensuring keys are unique. We should promote as a good practice, that the keys should be programmatically generated as unique, instead of read from data (which might be not unique).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2672/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2672/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2672", "html_url": "https://github.com/huggingface/datasets/pull/2672", "diff_url": "https://github.com/huggingface/datasets/pull/2672.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2672.patch", "merged_at": 1626676136000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2671
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2671/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2671/comments
https://api.github.com/repos/huggingface/datasets/issues/2671/events
https://github.com/huggingface/datasets/pull/2671
947,273,875
MDExOlB1bGxSZXF1ZXN0NjkyMjc5MTM0
2,671
Mesinesp development and training data sets have been added.
{ "login": "aslihanuysall", "id": 32900185, "node_id": "MDQ6VXNlcjMyOTAwMTg1", "avatar_url": "https://avatars.githubusercontent.com/u/32900185?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aslihanuysall", "html_url": "https://github.com/aslihanuysall", "followers_url": "https://api.github.com/users/aslihanuysall/followers", "following_url": "https://api.github.com/users/aslihanuysall/following{/other_user}", "gists_url": "https://api.github.com/users/aslihanuysall/gists{/gist_id}", "starred_url": "https://api.github.com/users/aslihanuysall/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aslihanuysall/subscriptions", "organizations_url": "https://api.github.com/users/aslihanuysall/orgs", "repos_url": "https://api.github.com/users/aslihanuysall/repos", "events_url": "https://api.github.com/users/aslihanuysall/events{/privacy}", "received_events_url": "https://api.github.com/users/aslihanuysall/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "It'll be new pull request with new commits." ]
1,626,671,678,000
1,626,679,948,000
1,626,677,150,000
NONE
null
https://zenodo.org/search?page=1&size=20&q=mesinesp, Mesinesp has Medical Semantic Indexed records in Spanish. Indexing is done using DeCS codes, a sort of Spanish equivalent to MeSH terms. The Mesinesp (Spanish BioASQ track, see https://temu.bsc.es/mesinesp) development set has a total of 750 records. The Mesinesp (Spanish BioASQ track, see https://temu.bsc.es/mesinesp) training set has a total of 369,368 records.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2671/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2671/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2671", "html_url": "https://github.com/huggingface/datasets/pull/2671", "diff_url": "https://github.com/huggingface/datasets/pull/2671.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2671.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2670
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2670/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2670/comments
https://api.github.com/repos/huggingface/datasets/issues/2670/events
https://github.com/huggingface/datasets/issues/2670
947,120,709
MDU6SXNzdWU5NDcxMjA3MDk=
2,670
Using sharding to parallelize indexing
{ "login": "ggdupont", "id": 5583410, "node_id": "MDQ6VXNlcjU1ODM0MTA=", "avatar_url": "https://avatars.githubusercontent.com/u/5583410?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ggdupont", "html_url": "https://github.com/ggdupont", "followers_url": "https://api.github.com/users/ggdupont/followers", "following_url": "https://api.github.com/users/ggdupont/following{/other_user}", "gists_url": "https://api.github.com/users/ggdupont/gists{/gist_id}", "starred_url": "https://api.github.com/users/ggdupont/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ggdupont/subscriptions", "organizations_url": "https://api.github.com/users/ggdupont/orgs", "repos_url": "https://api.github.com/users/ggdupont/repos", "events_url": "https://api.github.com/users/ggdupont/events{/privacy}", "received_events_url": "https://api.github.com/users/ggdupont/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,643,586,000
1,633,613,605,000
null
CONTRIBUTOR
null
**Is your feature request related to a problem? Please describe.** Creating an elasticsearch index on large dataset could be quite long and cannot be parallelized on shard (the index creation is colliding) **Describe the solution you'd like** When working on dataset shards, if an index already exists, its mapping should be checked and if compatible, the indexing process should continue with the shard data. Additionally, at the end of the process, the `_indexes` dict should be send back to the original dataset object (from which the shards have been created) to allow to use the index for later filtering on the whole dataset. **Describe alternatives you've considered** Each dataset shard could created independent partial indices. then on the whole dataset level, indices should be all referred in `_indexes` dict and be used in querying through `get_nearest_examples()`. The drawback is that the scores will be computed independently on the partial indices leading to inconsistent values for most scoring based on corpus level statistics (tf/idf, BM25). **Additional context** The objectives is to parallelize the index creation to speed-up the process (ie surcharging the ES server which is fine to handle large load) while later enabling search on the whole dataset.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2670/reactions", "total_count": 3, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 2, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2670/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2669
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2669/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2669/comments
https://api.github.com/repos/huggingface/datasets/issues/2669/events
https://github.com/huggingface/datasets/issues/2669
946,982,998
MDU6SXNzdWU5NDY5ODI5OTg=
2,669
Metric kwargs are not passed to underlying external metric f1_score
{ "login": "BramVanroy", "id": 2779410, "node_id": "MDQ6VXNlcjI3Nzk0MTA=", "avatar_url": "https://avatars.githubusercontent.com/u/2779410?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BramVanroy", "html_url": "https://github.com/BramVanroy", "followers_url": "https://api.github.com/users/BramVanroy/followers", "following_url": "https://api.github.com/users/BramVanroy/following{/other_user}", "gists_url": "https://api.github.com/users/BramVanroy/gists{/gist_id}", "starred_url": "https://api.github.com/users/BramVanroy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BramVanroy/subscriptions", "organizations_url": "https://api.github.com/users/BramVanroy/orgs", "repos_url": "https://api.github.com/users/BramVanroy/repos", "events_url": "https://api.github.com/users/BramVanroy/events{/privacy}", "received_events_url": "https://api.github.com/users/BramVanroy/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @BramVanroy, thanks for reporting.\r\n\r\nFirst, note that `\"min\"` is not an allowed value for `average`. According to scikit-learn [documentation](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html), `average` can only take the values: `{\"micro\", \"macro\", \"samples\", \"weighted\", \"binary\"} or None, default=\"binary\"`.\r\n\r\nSecond, you should take into account that all additional metric-specific argument should be passed in the method `compute` (and not in the method `load_metric`). You can find more information in our documentation: https://huggingface.co/docs/datasets/using_metrics.html#computing-the-metric-scores\r\n\r\nSo for example, if you would like to calculate the macro-averaged F1 score, you should use:\r\n```python\r\nimport datasets\r\n\r\nf1 = datasets.load_metric(\"f1\", keep_in_memory=True)\r\nf1.add_batch(predictions=[0,2,3], references=[1, 2, 3])\r\nf1.compute(average=\"macro\")\r\n```", "Thanks, that was it. A bit strange though, since `load_metric` had an argument `metric_init_kwargs`. I assume that that's for specific initialisation arguments whereas `average` is for the function itself." ]
1,626,597,151,000
1,626,633,365,000
1,626,607,144,000
CONTRIBUTOR
null
## Describe the bug When I want to use F1 score with average="min", this keyword argument does not seem to be passed through to the underlying sklearn metric. This is evident because [sklearn](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html) throws an error telling me so. ## Steps to reproduce the bug ```python import datasets f1 = datasets.load_metric("f1", keep_in_memory=True, average="min") f1.add_batch(predictions=[0,2,3], references=[1, 2, 3]) f1.compute() ``` ## Expected results No error, because `average="min"` should be passed correctly to f1_score in sklearn. ## Actual results ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "C:\Users\bramv\.virtualenvs\pipeline-TpEsXVex\lib\site-packages\datasets\metric.py", line 402, in compute output = self._compute(predictions=predictions, references=references, **kwargs) File "C:\Users\bramv\.cache\huggingface\modules\datasets_modules\metrics\f1\82177930a325d4c28342bba0f116d73f6d92fb0c44cd67be32a07c1262b61cfe\f1.py", line 97, in _compute "f1": f1_score( File "C:\Users\bramv\.virtualenvs\pipeline-TpEsXVex\lib\site-packages\sklearn\utils\validation.py", line 63, in inner_f return f(*args, **kwargs) File "C:\Users\bramv\.virtualenvs\pipeline-TpEsXVex\lib\site-packages\sklearn\metrics\_classification.py", line 1071, in f1_score return fbeta_score(y_true, y_pred, beta=1, labels=labels, File "C:\Users\bramv\.virtualenvs\pipeline-TpEsXVex\lib\site-packages\sklearn\utils\validation.py", line 63, in inner_f return f(*args, **kwargs) File "C:\Users\bramv\.virtualenvs\pipeline-TpEsXVex\lib\site-packages\sklearn\metrics\_classification.py", line 1195, in fbeta_score _, _, f, _ = precision_recall_fscore_support(y_true, y_pred, File "C:\Users\bramv\.virtualenvs\pipeline-TpEsXVex\lib\site-packages\sklearn\utils\validation.py", line 63, in inner_f return f(*args, **kwargs) File "C:\Users\bramv\.virtualenvs\pipeline-TpEsXVex\lib\site-packages\sklearn\metrics\_classification.py", line 1464, in precision_recall_fscore_support labels = _check_set_wise_labels(y_true, y_pred, average, labels, File "C:\Users\bramv\.virtualenvs\pipeline-TpEsXVex\lib\site-packages\sklearn\metrics\_classification.py", line 1294, in _check_set_wise_labels raise ValueError("Target is %s but average='binary'. Please " ValueError: Target is multiclass but average='binary'. Please choose another average setting, one of [None, 'micro', 'macro', 'weighted']. ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.0 - Platform: Windows-10-10.0.19041-SP0 - Python version: 3.9.2 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2669/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2669/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2668
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2668/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2668/comments
https://api.github.com/repos/huggingface/datasets/issues/2668/events
https://github.com/huggingface/datasets/pull/2668
946,867,622
MDExOlB1bGxSZXF1ZXN0NjkxOTY1MTY1
2,668
Add Russian SuperGLUE
{ "login": "slowwavesleep", "id": 44175589, "node_id": "MDQ6VXNlcjQ0MTc1NTg5", "avatar_url": "https://avatars.githubusercontent.com/u/44175589?v=4", "gravatar_id": "", "url": "https://api.github.com/users/slowwavesleep", "html_url": "https://github.com/slowwavesleep", "followers_url": "https://api.github.com/users/slowwavesleep/followers", "following_url": "https://api.github.com/users/slowwavesleep/following{/other_user}", "gists_url": "https://api.github.com/users/slowwavesleep/gists{/gist_id}", "starred_url": "https://api.github.com/users/slowwavesleep/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/slowwavesleep/subscriptions", "organizations_url": "https://api.github.com/users/slowwavesleep/orgs", "repos_url": "https://api.github.com/users/slowwavesleep/repos", "events_url": "https://api.github.com/users/slowwavesleep/events{/privacy}", "received_events_url": "https://api.github.com/users/slowwavesleep/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Added the missing label classes and their explanations (to the best of my understanding)", "Thanks a lot ! Once the last comment about the label names is addressed we can merge :)" ]
1,626,543,688,000
1,627,559,431,000
1,627,559,431,000
CONTRIBUTOR
null
Hi, This adds the [Russian SuperGLUE](https://russiansuperglue.com/) dataset. For the most part I reused the code for the original SuperGLUE, although there are some relatively minor differences in the structure that I accounted for.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2668/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2668/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2668", "html_url": "https://github.com/huggingface/datasets/pull/2668", "diff_url": "https://github.com/huggingface/datasets/pull/2668.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2668.patch", "merged_at": 1627559430000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2667
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2667/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2667/comments
https://api.github.com/repos/huggingface/datasets/issues/2667/events
https://github.com/huggingface/datasets/pull/2667
946,861,908
MDExOlB1bGxSZXF1ZXN0NjkxOTYwNzc3
2,667
Use tqdm from tqdm_utils
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "The current CI failure is due to modifications in the dataset script.", "Merging since the CI is only failing because of dataset card issues, which is unrelated to this PR" ]
1,626,541,595,000
1,626,716,350,000
1,626,715,920,000
CONTRIBUTOR
null
This PR replaces `tqdm` from the `tqdm` lib with `tqdm` from `datasets.utils.tqdm_utils`. With this change, it's possible to disable progress bars just by calling `disable_progress_bar`. Note this doesn't work on Windows when using multiprocessing due to how global variables are shared between processes. Currently, there is no easy way to disable progress bars in a multiprocess setting on Windows (patching logging with `datasets.utils.logging.get_verbosity = lambda: datasets.utils.logging.NOTSET` doesn't seem to work as well), so adding support for this is a future goal. Additionally, this PR adds a unit ("ba" for batches) to the bar printed by `Dataset.to_json` (this change is motivated by https://github.com/huggingface/datasets/issues/2657).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2667/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2667/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2667", "html_url": "https://github.com/huggingface/datasets/pull/2667", "diff_url": "https://github.com/huggingface/datasets/pull/2667.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2667.patch", "merged_at": 1626715920000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2666
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2666/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2666/comments
https://api.github.com/repos/huggingface/datasets/issues/2666/events
https://github.com/huggingface/datasets/pull/2666
946,825,140
MDExOlB1bGxSZXF1ZXN0NjkxOTMzMDM1
2,666
Adds CodeClippy dataset [WIP]
{ "login": "arampacha", "id": 69807323, "node_id": "MDQ6VXNlcjY5ODA3MzIz", "avatar_url": "https://avatars.githubusercontent.com/u/69807323?v=4", "gravatar_id": "", "url": "https://api.github.com/users/arampacha", "html_url": "https://github.com/arampacha", "followers_url": "https://api.github.com/users/arampacha/followers", "following_url": "https://api.github.com/users/arampacha/following{/other_user}", "gists_url": "https://api.github.com/users/arampacha/gists{/gist_id}", "starred_url": "https://api.github.com/users/arampacha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arampacha/subscriptions", "organizations_url": "https://api.github.com/users/arampacha/orgs", "repos_url": "https://api.github.com/users/arampacha/repos", "events_url": "https://api.github.com/users/arampacha/events{/privacy}", "received_events_url": "https://api.github.com/users/arampacha/received_events", "type": "User", "site_admin": false }
[]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,528,724,000
1,626,685,794,000
null
NONE
null
CodeClippy is an opensource code dataset scrapped from github during flax-jax-community-week https://the-eye.eu/public/AI/training_data/code_clippy_data/
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2666/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2666/timeline
null
true
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2666", "html_url": "https://github.com/huggingface/datasets/pull/2666", "diff_url": "https://github.com/huggingface/datasets/pull/2666.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2666.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2665
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2665/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2665/comments
https://api.github.com/repos/huggingface/datasets/issues/2665/events
https://github.com/huggingface/datasets/pull/2665
946,822,036
MDExOlB1bGxSZXF1ZXN0NjkxOTMwNjky
2,665
Adds APPS dataset to the hub [WIP]
{ "login": "arampacha", "id": 69807323, "node_id": "MDQ6VXNlcjY5ODA3MzIz", "avatar_url": "https://avatars.githubusercontent.com/u/69807323?v=4", "gravatar_id": "", "url": "https://api.github.com/users/arampacha", "html_url": "https://github.com/arampacha", "followers_url": "https://api.github.com/users/arampacha/followers", "following_url": "https://api.github.com/users/arampacha/following{/other_user}", "gists_url": "https://api.github.com/users/arampacha/gists{/gist_id}", "starred_url": "https://api.github.com/users/arampacha/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arampacha/subscriptions", "organizations_url": "https://api.github.com/users/arampacha/orgs", "repos_url": "https://api.github.com/users/arampacha/repos", "events_url": "https://api.github.com/users/arampacha/events{/privacy}", "received_events_url": "https://api.github.com/users/arampacha/received_events", "type": "User", "site_admin": false }
[]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,527,597,000
1,626,544,607,000
null
NONE
null
A loading script for [APPS dataset](https://github.com/hendrycks/apps)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2665/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 1, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2665/timeline
null
true
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2665", "html_url": "https://github.com/huggingface/datasets/pull/2665", "diff_url": "https://github.com/huggingface/datasets/pull/2665.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2665.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2663
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2663/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2663/comments
https://api.github.com/repos/huggingface/datasets/issues/2663/events
https://github.com/huggingface/datasets/issues/2663
946,552,273
MDU6SXNzdWU5NDY1NTIyNzM=
2,663
[`to_json`] add multi-proc sharding support
{ "login": "stas00", "id": 10676103, "node_id": "MDQ6VXNlcjEwNjc2MTAz", "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stas00", "html_url": "https://github.com/stas00", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "organizations_url": "https://api.github.com/users/stas00/orgs", "repos_url": "https://api.github.com/users/stas00/repos", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "received_events_url": "https://api.github.com/users/stas00/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @stas00, \r\nI want to work on this issue and I was thinking why don't we use `imap` [in this loop](https://github.com/huggingface/datasets/blob/440b14d0dd428ae1b25881aa72ba7bbb8ad9ff84/src/datasets/io/json.py#L99)? This way, using offset (which is being used to slice the pyarrow table) we can convert pyarrow table to `json` using multiprocessing. I've a small code snippet for some clarity:\r\n```\r\nresult = list(\r\n pool.imap(self._apply_df, [(offset, batch_size) for offset in range(0, len(self.dataset), batch_size)])\r\n )\r\n```\r\n`_apply_df` is a function which will return `batch.to_pandas().to_json(path_or_buf=None, orient=\"records\", lines=True)` which is basically json version of the batched pyarrow table. Later on we can concatenate it to form json file? \r\n\r\nI think the only downside here is to write file from `imap` output (output would be a list and we'll need to iterate over it and write in a file) which might add a little overhead cost. What do you think about this?", "Followed up in https://github.com/huggingface/datasets/pull/2747" ]
1,626,464,510,000
1,631,541,397,000
1,631,541,397,000
MEMBER
null
As discussed on slack it appears that `to_json` is quite slow on huge datasets like OSCAR. I implemented sharded saving, which is much much faster - but the tqdm bars all overwrite each other, so it's hard to make sense of the progress, so if possible ideally this multi-proc support could be implemented internally in `to_json` via `num_proc` argument. I guess `num_proc` will be the number of shards? I think the user will need to use this feature wisely, since too many processes writing to say normal style HD is likely to be slower than one process. I'm not sure whether the user should be responsible to concatenate the shards at the end or `datasets`, either way works for my needs. The code I was using: ``` from multiprocessing import cpu_count, Process, Queue [...] filtered_dataset = concat_dataset.map(filter_short_documents, batched=True, batch_size=256, num_proc=cpu_count()) DATASET_NAME = "oscar" SHARDS = 10 def process_shard(idx): print(f"Sharding {idx}") ds_shard = filtered_dataset.shard(SHARDS, idx, contiguous=True) # ds_shard = ds_shard.shuffle() # remove contiguous=True above if shuffling print(f"Saving {DATASET_NAME}-{idx}.jsonl") ds_shard.to_json(f"{DATASET_NAME}-{idx}.jsonl", orient="records", lines=True, force_ascii=False) queue = Queue() processes = [Process(target=process_shard, args=(idx,)) for idx in range(SHARDS)] for p in processes: p.start() for p in processes: p.join() ``` Thank you! @lhoestq
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2663/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2663/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2662
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2662/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2662/comments
https://api.github.com/repos/huggingface/datasets/issues/2662/events
https://github.com/huggingface/datasets/pull/2662
946,470,815
MDExOlB1bGxSZXF1ZXN0NjkxNjM5MjU5
2,662
Load Dataset from the Hub (NO DATASET SCRIPT)
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "This is ready for review now :)\r\n\r\nI would love to have some feedback on the changes in load.py @albertvillanova. There are many changes so if you have questions let me know, especially on the `resolve_data_files` functions and on the changes in `prepare_module`.\r\n\r\nAnd @thomwolf if you want to take a look at the documentation, feel free to share your suggestions :)", "I took your comments into account thanks !\r\nAnd I made `aiohttp` a required dependency :)", "Just updated the documentation :)\r\n[share_datasets.html](https://45532-250213286-gh.circle-artifacts.com/0/docs/_build/html/share_dataset.html)\r\n\r\nLet me know if you have some comments", "Merging this one :) \r\n\r\nWe can try to integrate the changes in the docs to #2718 @stevhliu !", "Baked this into the [docs](https://44335-250213286-gh.circle-artifacts.com/0/docs/_build/html/loading.html#hugging-face-hub) already, let me know if there is anything else I should add! :)" ]
1,626,456,118,000
1,629,903,181,000
1,629,901,088,000
MEMBER
null
## Load the data from any Dataset repository on the Hub This PR adds support for loading datasets from any dataset repository on the hub, without requiring any dataset script. As a user it's now possible to create a repo and upload some csv/json/text/parquet files, and then be able to load the data in one line. Here is an example with the `allenai/c4` repository that contains a lot of compressed json lines files: ```python from datasets import load_dataset data_files = {"train": "en/c4-train.*.json.gz"} c4 = load_dataset("allenai/c4", data_files=data_files, split="train", streaming=True) print(c4.n_shards) # 1024 print(next(iter(c4))) # {'text': 'Beginners BBQ Class Takin...'} ``` By default it loads all the files, but as shown in the example you can choose the ones you want with unix style patterns. Of course it's still possible to use dataset scripts since they offer the most flexibility. ## Implementation details It uses `huggingface_hub` to list the files in a dataset repository. If you provide a path to a local directory instead of a repository name, it works the same way but it uses `glob`. Depending on the data files available, or passed in the `data_files` parameter, one of the available builders will be used among the csv, json, text and parquet builders. Because of this, it's not possible to load both csv and json files at once. In this case you have to load them separately and then concatenate the two datasets for example. ## TODO - [x] tests - [x] docs - [x] when huggingface_hub gets a new release, update the CI and the setup.py Close https://github.com/huggingface/datasets/issues/2629
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2662/reactions", "total_count": 5, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 5, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2662/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2662", "html_url": "https://github.com/huggingface/datasets/pull/2662", "diff_url": "https://github.com/huggingface/datasets/pull/2662.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2662.patch", "merged_at": 1629901088000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2661
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2661/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2661/comments
https://api.github.com/repos/huggingface/datasets/issues/2661/events
https://github.com/huggingface/datasets/pull/2661
946,446,967
MDExOlB1bGxSZXF1ZXN0NjkxNjE5MzAz
2,661
Add SD task for SUPERB
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "I make a summary about our discussion with @lewtun and @Narsil on the agreed schema for this dataset and the additional steps required to generate the 2D array labels:\r\n- The labels for this dataset are a 2D array:\r\n Given an example:\r\n ```python\r\n {\"record_id\": record_id, \"file\": file, \"start\": start, \"end\": end, \"speakers\": [...]}\r\n ```\r\n the labels are a 2D array of shape `(num_frames, num_speakers)` where `num_frames = end - start` and `num_speakers = 2`.\r\n- In order to avoid a too large dataset (too large disk space), `datasets` does not store the 2D array label. Instead, we store a compact form:\r\n ```\r\n \"speakers\": [\r\n {\"speaker_id\": speaker_0_id, \"start\": start_0_speaker_0, \"end\": end_0_speaker_0},\r\n {\"speaker_id\": speaker_0_id, \"start\": start_1_speaker_0, \"end\": end_1_speaker_0},\r\n {\"speaker_id\": speaker_1_id, \"start\": start_0_speaker_1, \"end\": end_0_speaker_1},\r\n ],\r\n ```\r\n - Once loaded the dataset, an additional step is required to generate the 2D array label from this compact form\r\n - This additional step should be a modified version of the s3prl method `_get_labeled_speech`:\r\n - Original s3prl `_get_labeled_speech` includes 2 functionalities: reading the audio file and transforming it into an array, and generating the label 2D array; I think we should separate these 2 functionalities\r\n - Original s3prl `_get_labeled_speech` performs 2 steps to generate the labels:\r\n - Transform start/end seconds (float) into frame numbers (int): I have already done this step to generate the dataset\r\n - Generate the 2D array label from the frame numbers\r\n\r\nI also ping @osanseviero and @lhoestq to include them in the loop.", "Here I would like to discuss (and agree) one of the decisions I made, as I'm not completely satisfied with it: to transform the seconds (float) into frame numbers (int) to generate this dataset.\r\n\r\n- A priori, the most natural and general choice would be to preserve the seconds (float), because:\r\n - this is the way the raw data comes from\r\n - the transformation into frame numbers depends on the sample rate, frame_shift and subsampling\r\n\r\nHowever, I finally decided to transform seconds into frame numbers because:\r\n- for SUPERB, sampling rate, frame_shift and subsampling are fixed (`rate = 16_000`, `frame_shift = 160`, `subsampling = 1`)\r\n- it makes easier the post-processing, as labels are generated from sample numbers: labels are a 2D array of shape `(num_frames, num_speakers)`\r\n- the number of examples depends on the number of frames:\r\n - if an example has more than 2_000 frames, then it is split into 2 examples. This is the case for `record_id = \"7859-102521-0017_3983-5371-0014\"`, which has 2_452 frames and it is split into 2 examples:\r\n ```\r\n {\"record_id\": \"7859-102521-0017_3983-5371-0014\", \"start\"= 0, \"end\": 2_000,...},\r\n {\"record_id\": \"7859-102521-0017_3983-5371-0014\", \"start\"= 2_000, \"end\": 2_452,...},\r\n ```\r\n\r\nAs I told you, I'm not totally convinced of this decision, and I would really appreciate your opinion.\r\n\r\ncc: @lewtun @Narsil @osanseviero @lhoestq ", "It makes total sense to prepare the data to be in a format that can actually be used for model training and evaluation. That's one of the roles of this lib :)\r\n\r\nSo for me it's ok to use frames as a unit instead of seconds. Just pinging @patrickvonplaten in case he has ever played with such audio tasks and has some advice. For the context: the task is to classify which speaker is speaking, let us know if you are aware of any convenient/standard format for this.\r\n\r\nAlso I'm not sure why you have to split an example if it's longer that 2,000 frames ?", "> Also I'm not sure why you have to split an example if it's longer that 2,000 frames ?\r\n\r\nIt is a convention in SUPERB benchmark.", "Note that if we agree to leave the dataset as it is now, 2 additional custom functions must be used:\r\n- one to generate the 2D array labels\r\n- one to load the audio file into an array, but taking into account start/end to cut the audio\r\n\r\nIs there a way we can give these functions ready to be used? Or should we leave this entirely to the end user? This is not trivial...", "You could add an example of usage in the dataset card, as it is done for other audio datasets", "@albertvillanova this simple function can be edited simply to add the start/stop cuts \r\n\r\nhttps://github.com/huggingface/transformers/blob/master/src/transformers/pipelines/automatic_speech_recognition.py#L29 ", "Does this function work on windows ?", "Windows ? What is it ? (Not sure not able to test, it's directly calling ffmpeg binary, so depending on the setup it could but can't say for sure without testing)\r\n", "It's one of the OS we're supposed to support :P (for the better and for the worse)", "> Note that if we agree to leave the dataset as it is now, 2 additional custom functions must be used:\r\n> \r\n> * one to generate the 2D array labels\r\n> * one to load the audio file into an array, but taking into account start/end to cut the audio\r\n> \r\n> Is there a way we can give these functions ready to be used? Or should we leave this entirely to the end user? This is not trivial...\r\n\r\n+1 on providing the necessary functions on the dataset card. aside from that, the current implementation looks great from my perspective!" ]
1,626,453,801,000
1,628,096,633,000
1,628,096,633,000
MEMBER
null
Include the SD (Speaker Diarization) task as described in the [SUPERB paper](https://arxiv.org/abs/2105.01051) and `s3prl` [instructions](https://github.com/s3prl/s3prl/tree/master/s3prl/downstream#sd-speaker-diarization). TODO: - [x] Generate the LibriMix corpus - [x] Prepare the corpus for diarization - [x] Upload these files to the superb-data repo - [x] Transcribe the corresponding s3prl processing of these files into our superb loading script - [x] README: tags + description sections - ~~Add DER metric~~ (we leave the DER metric for a follow-up PR) Related to #2619. Close #2653. cc: @lewtun
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2661/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2661/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2661", "html_url": "https://github.com/huggingface/datasets/pull/2661", "diff_url": "https://github.com/huggingface/datasets/pull/2661.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2661.patch", "merged_at": 1628096632000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2660
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2660/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2660/comments
https://api.github.com/repos/huggingface/datasets/issues/2660/events
https://github.com/huggingface/datasets/pull/2660
946,316,180
MDExOlB1bGxSZXF1ZXN0NjkxNTA4NzE0
2,660
Move checks from _map_single to map
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "@lhoestq This one has been open for a while. Could you please take a look?", "@lhoestq Ready for the final review!", "I forgot to update the signature of `DatasetDict.map`, so did that now." ]
1,626,443,613,000
1,630,937,543,000
1,630,937,543,000
CONTRIBUTOR
null
The goal of this PR is to remove duplicated checks in the `map` logic to execute them only once whenever possible (`fn_kwargs`, `input_columns`, ...). Additionally, this PR improves the consistency (to align it with `input_columns`) of the `remove_columns` check by adding support for a single string value, which is then wrapped into a list.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2660/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2660/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2660", "html_url": "https://github.com/huggingface/datasets/pull/2660", "diff_url": "https://github.com/huggingface/datasets/pull/2660.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2660.patch", "merged_at": 1630937543000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2659
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2659/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2659/comments
https://api.github.com/repos/huggingface/datasets/issues/2659/events
https://github.com/huggingface/datasets/pull/2659
946,155,407
MDExOlB1bGxSZXF1ZXN0NjkxMzcwNzU3
2,659
Allow dataset config kwargs to be None
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,431,138,000
1,626,439,567,000
1,626,439,567,000
MEMBER
null
Close https://github.com/huggingface/datasets/issues/2658 The dataset config kwargs that were set to None we simply ignored. This was an issue when None has some meaning for certain parameters of certain builders, like the `sep` parameter of the "csv" builder that allows to infer to separator. cc @SBrandeis
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2659/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2659/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2659", "html_url": "https://github.com/huggingface/datasets/pull/2659", "diff_url": "https://github.com/huggingface/datasets/pull/2659.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2659.patch", "merged_at": 1626439566000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2658
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2658/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2658/comments
https://api.github.com/repos/huggingface/datasets/issues/2658/events
https://github.com/huggingface/datasets/issues/2658
946,139,532
MDU6SXNzdWU5NDYxMzk1MzI=
2,658
Can't pass `sep=None` to load_dataset("csv", ...) to infer the separator via pandas.read_csv
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,429,944,000
1,626,439,566,000
1,626,439,566,000
MEMBER
null
When doing `load_dataset("csv", sep=None)`, the `sep` passed to `pd.read_csv` is still the default `sep=","` instead, which makes it impossible to make the csv loader infer the separator. Related to https://github.com/huggingface/datasets/pull/2656 cc @SBrandeis
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2658/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2658/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2657
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2657/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2657/comments
https://api.github.com/repos/huggingface/datasets/issues/2657/events
https://github.com/huggingface/datasets/issues/2657
945,822,829
MDU6SXNzdWU5NDU4MjI4Mjk=
2,657
`to_json` reporting enhancements
{ "login": "stas00", "id": 10676103, "node_id": "MDQ6VXNlcjEwNjc2MTAz", "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stas00", "html_url": "https://github.com/stas00", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "organizations_url": "https://api.github.com/users/stas00/orgs", "repos_url": "https://api.github.com/users/stas00/repos", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "received_events_url": "https://api.github.com/users/stas00/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,391,938,000
1,626,392,033,000
null
MEMBER
null
While using `to_json` 2 things came to mind that would have made the experience easier on the user: 1. Could we have a `desc` arg for the tqdm use and a fallback to just `to_json` so that it'd be clear to the user what's happening? Surely, one can just print the description before calling json, but I thought perhaps it'd help to have it self-identify like you did for other progress bars recently. 2. It took me a while to make sense of the reported numbers: ``` 22%|██▏ | 1536/7076 [12:30:57<44:09:42, 28.70s/it] ``` So iteration here happens to be 10K samples, and the total is 70M records. But the user does't know that, so the progress bar is perfect, but the numbers it reports are meaningless until one discovers that 1it=10K samples. And one still has to convert these in the head - so it's not quick. Not exactly sure what's the best way to approach this, perhaps it can be part of `desc`? or report M or K, so it'd be built-in if it were to print, e.g.: ``` 22%|██▏ | 15360K/70760K [12:30:57<44:09:42, 28.70s/it] ``` or ``` 22%|██▏ | 15.36M/70.76M [12:30:57<44:09:42, 28.70s/it] ``` (while of course remaining friendly to small datasets) I forget if tqdm lets you add a magnitude identifier to the running count. Thank you!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2657/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2657/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2656
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2656/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2656/comments
https://api.github.com/repos/huggingface/datasets/issues/2656/events
https://github.com/huggingface/datasets/pull/2656
945,421,790
MDExOlB1bGxSZXF1ZXN0NjkwNzUzNjA3
2,656
Change `from_csv` default arguments
{ "login": "SBrandeis", "id": 33657802, "node_id": "MDQ6VXNlcjMzNjU3ODAy", "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SBrandeis", "html_url": "https://github.com/SBrandeis", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "repos_url": "https://api.github.com/users/SBrandeis/repos", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "This is not the default in pandas right ?\r\nWe try to align our CSV loader with the pandas API.\r\n\r\nMoreover according to their documentation, the python parser is used when sep is None, which might not be the fastest one.\r\n\r\nMaybe users could just specify `sep=None` themselves ?\r\nIn this case we should add some documentation about this" ]
1,626,358,146,000
1,626,431,006,000
1,626,431,006,000
CONTRIBUTOR
null
Passing `sep=None` to pandas's `read_csv` lets pandas guess the CSV file's separator This PR allows users to use this pandas's feature by passing `sep=None` to `Dataset.from_csv`: ```python Dataset.from_csv( ..., sep=None ) ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2656/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2656/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2656", "html_url": "https://github.com/huggingface/datasets/pull/2656", "diff_url": "https://github.com/huggingface/datasets/pull/2656.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2656.patch", "merged_at": null }
true
https://api.github.com/repos/huggingface/datasets/issues/2655
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2655/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2655/comments
https://api.github.com/repos/huggingface/datasets/issues/2655/events
https://github.com/huggingface/datasets/issues/2655
945,382,723
MDU6SXNzdWU5NDUzODI3MjM=
2,655
Allow the selection of multiple columns at once
{ "login": "Dref360", "id": 8976546, "node_id": "MDQ6VXNlcjg5NzY1NDY=", "avatar_url": "https://avatars.githubusercontent.com/u/8976546?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Dref360", "html_url": "https://github.com/Dref360", "followers_url": "https://api.github.com/users/Dref360/followers", "following_url": "https://api.github.com/users/Dref360/following{/other_user}", "gists_url": "https://api.github.com/users/Dref360/gists{/gist_id}", "starred_url": "https://api.github.com/users/Dref360/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Dref360/subscriptions", "organizations_url": "https://api.github.com/users/Dref360/orgs", "repos_url": "https://api.github.com/users/Dref360/repos", "events_url": "https://api.github.com/users/Dref360/events{/privacy}", "received_events_url": "https://api.github.com/users/Dref360/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi! I was looking into this and hope you can clarify a point. Your my_dataset variable would be of type DatasetDict which means the alternative you've described (dict comprehension) is what makes sense. \r\nIs there a reason why you wouldn't want to convert my_dataset to a pandas df if you'd like to use it like one? Please let me know if I'm missing something.", "Hi! Sorry for the delay.\r\n\r\nIn this case, the dataset would be a `datasets.Dataset` and we want to select multiple columns, the `idx` and `label` columns for example.\r\n\r\nMy issue is that my dataset is too big for memory if I load everything into pandas." ]
1,626,355,845,000
1,627,054,857,000
null
CONTRIBUTOR
null
**Is your feature request related to a problem? Please describe.** Similar to pandas, it would be great if we could select multiple columns at once. **Describe the solution you'd like** ```python my_dataset = ... # Has columns ['idx', 'sentence', 'label'] idx, label = my_dataset[['idx', 'label']] ``` **Describe alternatives you've considered** we can do `[dataset[col] for col in ('idx', 'label')]` **Additional context** This is of course very minor.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2655/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2655/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2654
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2654/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2654/comments
https://api.github.com/repos/huggingface/datasets/issues/2654/events
https://github.com/huggingface/datasets/issues/2654
945,167,231
MDU6SXNzdWU5NDUxNjcyMzE=
2,654
Give a user feedback if the dataset he loads is streamable or not
{ "login": "philschmid", "id": 32632186, "node_id": "MDQ6VXNlcjMyNjMyMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/32632186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/philschmid", "html_url": "https://github.com/philschmid", "followers_url": "https://api.github.com/users/philschmid/followers", "following_url": "https://api.github.com/users/philschmid/following{/other_user}", "gists_url": "https://api.github.com/users/philschmid/gists{/gist_id}", "starred_url": "https://api.github.com/users/philschmid/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/philschmid/subscriptions", "organizations_url": "https://api.github.com/users/philschmid/orgs", "repos_url": "https://api.github.com/users/philschmid/repos", "events_url": "https://api.github.com/users/philschmid/events{/privacy}", "received_events_url": "https://api.github.com/users/philschmid/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "#self-assign", "I understand it already raises a `NotImplementedError` exception, eg:\r\n\r\n```\r\n>>> dataset = load_dataset(\"journalists_questions\", name=\"plain_text\", split=\"train\", streaming=True)\r\n\r\n[...]\r\nNotImplementedError: Extraction protocol for file at https://drive.google.com/uc?export=download&id=1CBrh-9OrSpKmPQBxTK_ji6mq6WTN_U9U is not implemented yet\r\n```\r\n" ]
1,626,340,047,000
1,627,902,201,000
null
MEMBER
null
**Is your feature request related to a problem? Please describe.** I would love to know if a `dataset` is with the current implementation streamable or not. **Describe the solution you'd like** We could show a warning when a dataset is loaded with `load_dataset('...',streaming=True)` when its lot streamable, e.g. if it is an archive. **Describe alternatives you've considered** Add a new metadata tag for "streaming"
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2654/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2654/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2653
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2653/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2653/comments
https://api.github.com/repos/huggingface/datasets/issues/2653/events
https://github.com/huggingface/datasets/issues/2653
945,102,321
MDU6SXNzdWU5NDUxMDIzMjE=
2,653
Add SD task for SUPERB
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/7", "html_url": "https://github.com/huggingface/datasets/milestone/7", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/7/labels", "id": 6931350, "node_id": "MDk6TWlsZXN0b25lNjkzMTM1MA==", "number": 7, "title": "1.11", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 2, "state": "closed", "created_at": 1625809740000, "updated_at": 1630560843000, "due_on": 1627628400000, "closed_at": 1630560843000 }
[ "Note that this subset requires us to:\r\n\r\n* generate the LibriMix corpus from LibriSpeech\r\n* prepare the corpus for diarization\r\n\r\nAs suggested by @lhoestq we should perform these steps locally and add the prepared data to this public repo on the Hub: https://huggingface.co/datasets/superb/superb-data\r\n\r\nThen we can use the URLs for the files to load the data in `superb`'s dataset loading script.\r\n\r\nFor consistency, I suggest we name the folders in `superb-data` in the same way as the configs in the dataset loading script - e.g. use `sd` for speech diarization in both places :)", "@lewtun @lhoestq: \r\n\r\nI have already generated the LibriMix corpus and prepared the corpus for diarization. The output is 3 dirs (train, dev, test), each one containing 6 files: reco2dur rttm segments spk2utt utt2spk wav.scp\r\n\r\nNext steps:\r\n- Upload these files to the superb-data repo\r\n- Transcribe the corresponding s3prl processing of these files into our superb loading script\r\n\r\nNote that processing of these files is a bit more intricate than usual datasets: https://github.com/s3prl/s3prl/blob/master/s3prl/downstream/diarization/dataset.py#L233\r\n\r\n" ]
1,626,335,500,000
1,628,096,632,000
1,628,096,632,000
MEMBER
null
Include the SD (Speaker Diarization) task as described in the [SUPERB paper](https://arxiv.org/abs/2105.01051) and `s3prl` [instructions](https://github.com/s3prl/s3prl/tree/master/s3prl/downstream#sd-speaker-diarization). Steps: - [x] Generate the LibriMix corpus - [x] Prepare the corpus for diarization - [x] Upload these files to the superb-data repo - [x] Transcribe the corresponding s3prl processing of these files into our superb loading script - [ ] README: tags + description sections Related to #2619. cc: @lewtun
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2653/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2653/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2652
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2652/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2652/comments
https://api.github.com/repos/huggingface/datasets/issues/2652/events
https://github.com/huggingface/datasets/pull/2652
944,865,924
MDExOlB1bGxSZXF1ZXN0NjkwMjg0MTI4
2,652
Fix logging docstring
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,304,798,000
1,626,608,466,000
1,626,343,051,000
CONTRIBUTOR
null
Remove "no tqdm bars" from the docstring in the logging module to align it with the changes introduced in #2534.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2652/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2652/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2652", "html_url": "https://github.com/huggingface/datasets/pull/2652", "diff_url": "https://github.com/huggingface/datasets/pull/2652.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2652.patch", "merged_at": 1626343051000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2651
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2651/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2651/comments
https://api.github.com/repos/huggingface/datasets/issues/2651/events
https://github.com/huggingface/datasets/issues/2651
944,796,961
MDU6SXNzdWU5NDQ3OTY5NjE=
2,651
Setting log level higher than warning does not suppress progress bar
{ "login": "Isa-rentacs", "id": 1147443, "node_id": "MDQ6VXNlcjExNDc0NDM=", "avatar_url": "https://avatars.githubusercontent.com/u/1147443?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Isa-rentacs", "html_url": "https://github.com/Isa-rentacs", "followers_url": "https://api.github.com/users/Isa-rentacs/followers", "following_url": "https://api.github.com/users/Isa-rentacs/following{/other_user}", "gists_url": "https://api.github.com/users/Isa-rentacs/gists{/gist_id}", "starred_url": "https://api.github.com/users/Isa-rentacs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Isa-rentacs/subscriptions", "organizations_url": "https://api.github.com/users/Isa-rentacs/orgs", "repos_url": "https://api.github.com/users/Isa-rentacs/repos", "events_url": "https://api.github.com/users/Isa-rentacs/events{/privacy}", "received_events_url": "https://api.github.com/users/Isa-rentacs/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi,\r\n\r\nyou can suppress progress bars by patching logging as follows:\r\n```python\r\nimport datasets\r\nimport logging\r\ndatasets.logging.get_verbosity = lambda: logging.NOTSET\r\n# map call ...\r\n```", "Thank you, it worked :)", "See https://github.com/huggingface/datasets/issues/2528 for reference", "Note also that you can disable the progress bar with\r\n\r\n```python\r\nfrom datasets.utils import disable_progress_bar\r\ndisable_progress_bar()\r\n```\r\n\r\nSee https://github.com/huggingface/datasets/blob/8814b393984c1c2e1800ba370de2a9f7c8644908/src/datasets/utils/tqdm_utils.py#L84", "Now the library officially recommends `set_progress_bar_enabled(False)`\r\n\r\n```py\r\nfrom datasets.utils import set_progress_bar_enabled\r\n\r\nset_progress_bar_enabled(False)\r\n```\r\n\r\nsource:\r\n\r\nhttps://github.com/huggingface/datasets/blob/1fd47120ace13626c528367787ffa13e1a26e6c0/src/datasets/utils/tqdm_utils.py#L83-L88\r\n\r\n", "From https://github.com/huggingface/datasets/pull/3897, `disable_progress_bar` is the function you should use" ]
1,626,296,811,000
1,647,268,153,000
1,626,320,495,000
NONE
null
## Describe the bug I would like to disable progress bars for `.map` method (and other methods like `.filter` and `load_dataset` as well). According to #1627 one can suppress it by setting log level higher than `warning`, however doing so doesn't suppress it with version 1.9.0. I also tried to set `DATASETS_VERBOSITY` environment variable to `error` or `critical` but it also didn't work. ## Steps to reproduce the bug ```python import datasets from datasets.utils.logging import set_verbosity_error set_verbosity_error() def dummy_map(batch): return batch common_voice_train = datasets.load_dataset("common_voice", "de", split="train") common_voice_test = datasets.load_dataset("common_voice", "de", split="test") common_voice_train.map(dummy_map) ``` ## Expected results - The progress bar for `.map` call won't be shown ## Actual results - The progress bar for `.map` is still shown ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.0 - Platform: Linux-5.4.0-1045-aws-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.5 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2651/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2651/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2650
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2650/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2650/comments
https://api.github.com/repos/huggingface/datasets/issues/2650/events
https://github.com/huggingface/datasets/issues/2650
944,672,565
MDU6SXNzdWU5NDQ2NzI1NjU=
2,650
[load_dataset] shard and parallelize the process
{ "login": "stas00", "id": 10676103, "node_id": "MDQ6VXNlcjEwNjc2MTAz", "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stas00", "html_url": "https://github.com/stas00", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "organizations_url": "https://api.github.com/users/stas00/orgs", "repos_url": "https://api.github.com/users/stas00/repos", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "received_events_url": "https://api.github.com/users/stas00/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "I need the same feature for distributed training" ]
1,626,285,898,000
1,635,177,238,000
null
MEMBER
null
- Some huge datasets take forever to build the first time. (e.g. oscar/en) as it's done in a single cpu core. - If the build crashes, everything done up to that point gets lost Request: Shard the build over multiple arrow files, which would enable: - much faster build by parallelizing the build process - if the process crashed, the completed arrow files don't need to be re-built again Thank you! @lhoestq
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2650/reactions", "total_count": 9, "+1": 5, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 2, "eyes": 2 }
https://api.github.com/repos/huggingface/datasets/issues/2650/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2649
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2649/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2649/comments
https://api.github.com/repos/huggingface/datasets/issues/2649/events
https://github.com/huggingface/datasets/issues/2649
944,651,229
MDU6SXNzdWU5NDQ2NTEyMjk=
2,649
adding progress bar / ETA for `load_dataset`
{ "login": "stas00", "id": 10676103, "node_id": "MDQ6VXNlcjEwNjc2MTAz", "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stas00", "html_url": "https://github.com/stas00", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "organizations_url": "https://api.github.com/users/stas00/orgs", "repos_url": "https://api.github.com/users/stas00/repos", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "received_events_url": "https://api.github.com/users/stas00/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,284,079,000
1,626,284,280,000
null
MEMBER
null
Please consider: ``` Downloading and preparing dataset oscar/unshuffled_deduplicated_en (download: 462.40 GiB, generated: 1.18 TiB, post-processed: Unknown size, total: 1.63 TiB) to cache/oscar/unshuffled_deduplicated_en/1.0.0/84838bd49d2295f62008383b05620571535451d84545037bb94d6f3501651df2... HF google storage unreachable. Downloading and preparing it from source ``` and no indication whatsoever of whether things work well or when it'll be done. It's important to have an estimated completion time for when doing slurm jobs since some instances have a cap on run-time. I think for this particular job it sat for 30min in total silence and then after 30min it started generating: ``` 897850 examples [07:24, 10286.71 examples/s] ``` which is already great! Request: 1. ETA - knowing how many hours to allocate for a slurm job 2. progress bar - helps to know things are working and aren't stuck and where we are at. Thank you! @lhoestq
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2649/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2649/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2648
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2648/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2648/comments
https://api.github.com/repos/huggingface/datasets/issues/2648/events
https://github.com/huggingface/datasets/issues/2648
944,484,522
MDU6SXNzdWU5NDQ0ODQ1MjI=
2,648
Add web_split dataset for Paraphase and Rephrase benchmark
{ "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false }
[ { "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "#take" ]
1,626,272,676,000
1,626,272,772,000
null
CONTRIBUTOR
null
## Describe: For getting simple sentences from complex sentence there are dataset and task like wiki_split that is available in hugging face datasets. This web_split is a very similar dataset. There some research paper which states that by combining these two datasets we if we train the model it will yield better results on both tests data. This dataset is made from web NLG data. All the dataset related details are provided in the below repository Github link: https://github.com/shashiongithub/Split-and-Rephrase
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2648/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2648/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2647
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2647/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2647/comments
https://api.github.com/repos/huggingface/datasets/issues/2647/events
https://github.com/huggingface/datasets/pull/2647
944,424,941
MDExOlB1bGxSZXF1ZXN0Njg5OTExMzky
2,647
Fix anchor in README
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[]
1,626,268,964,000
1,626,608,478,000
1,626,331,847,000
CONTRIBUTOR
null
I forgot to push this fix in #2611, so I'm sending it now.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2647/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2647/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2647", "html_url": "https://github.com/huggingface/datasets/pull/2647", "diff_url": "https://github.com/huggingface/datasets/pull/2647.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2647.patch", "merged_at": 1626331847000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2646
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2646/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2646/comments
https://api.github.com/repos/huggingface/datasets/issues/2646/events
https://github.com/huggingface/datasets/issues/2646
944,379,954
MDU6SXNzdWU5NDQzNzk5NTQ=
2,646
downloading of yahoo_answers_topics dataset failed
{ "login": "vikrant7k", "id": 66781249, "node_id": "MDQ6VXNlcjY2NzgxMjQ5", "avatar_url": "https://avatars.githubusercontent.com/u/66781249?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vikrant7k", "html_url": "https://github.com/vikrant7k", "followers_url": "https://api.github.com/users/vikrant7k/followers", "following_url": "https://api.github.com/users/vikrant7k/following{/other_user}", "gists_url": "https://api.github.com/users/vikrant7k/gists{/gist_id}", "starred_url": "https://api.github.com/users/vikrant7k/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vikrant7k/subscriptions", "organizations_url": "https://api.github.com/users/vikrant7k/orgs", "repos_url": "https://api.github.com/users/vikrant7k/repos", "events_url": "https://api.github.com/users/vikrant7k/events{/privacy}", "received_events_url": "https://api.github.com/users/vikrant7k/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi ! I just tested and it worked fine today for me.\r\n\r\nI think this is because the dataset is stored on Google Drive which has a quota limit for the number of downloads per day, see this similar issue https://github.com/huggingface/datasets/issues/996 \r\n\r\nFeel free to try again today, now that the quota was reset" ]
1,626,265,865,000
1,626,340,516,000
null
NONE
null
## Describe the bug I get an error datasets.utils.info_utils.NonMatchingChecksumError: Checksums didn't match for dataset source files when I try to download the yahoo_answers_topics dataset ## Steps to reproduce the bug self.dataset = load_dataset( 'yahoo_answers_topics', cache_dir=self.config['yahoo_cache_dir'], split='train[:90%]') # Sample code to reproduce the bug self.dataset = load_dataset( 'yahoo_answers_topics', cache_dir=self.config['yahoo_cache_dir'], split='train[:90%]') ## Expected results A clear and concise description of the expected results. ## Actual results Specify the actual results or traceback. datasets.utils.info_utils.NonMatchingChecksumError: Checksums didn't match for dataset source files
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2646/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2646/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2645
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2645/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2645/comments
https://api.github.com/repos/huggingface/datasets/issues/2645/events
https://github.com/huggingface/datasets/issues/2645
944,374,284
MDU6SXNzdWU5NDQzNzQyODQ=
2,645
load_dataset processing failed with OS error after downloading a dataset
{ "login": "fake-warrior8", "id": 40395156, "node_id": "MDQ6VXNlcjQwMzk1MTU2", "avatar_url": "https://avatars.githubusercontent.com/u/40395156?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fake-warrior8", "html_url": "https://github.com/fake-warrior8", "followers_url": "https://api.github.com/users/fake-warrior8/followers", "following_url": "https://api.github.com/users/fake-warrior8/following{/other_user}", "gists_url": "https://api.github.com/users/fake-warrior8/gists{/gist_id}", "starred_url": "https://api.github.com/users/fake-warrior8/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fake-warrior8/subscriptions", "organizations_url": "https://api.github.com/users/fake-warrior8/orgs", "repos_url": "https://api.github.com/users/fake-warrior8/repos", "events_url": "https://api.github.com/users/fake-warrior8/events{/privacy}", "received_events_url": "https://api.github.com/users/fake-warrior8/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi ! It looks like an issue with pytorch.\r\n\r\nCould you try to run `import torch` and see if it raises an error ?", "> Hi ! It looks like an issue with pytorch.\r\n> \r\n> Could you try to run `import torch` and see if it raises an error ?\r\n\r\nIt works. Thank you!" ]
1,626,265,433,000
1,626,341,642,000
1,626,341,642,000
NONE
null
## Describe the bug After downloading a dataset like opus100, there is a bug that OSError: Cannot find data file. Original error: dlopen: cannot load any more object with static TLS ## Steps to reproduce the bug ```python from datasets import load_dataset this_dataset = load_dataset('opus100', 'af-en') ``` ## Expected results there is no error when running load_dataset. ## Actual results Specify the actual results or traceback. Traceback (most recent call last): File "/home/anaconda3/lib/python3.6/site-packages/datasets/builder.py", line 652, in _download_and_prep self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/anaconda3/lib/python3.6/site-packages/datasets/builder.py", line 989, in _prepare_split example = self.info.features.encode_example(record) File "/home/anaconda3/lib/python3.6/site-packages/datasets/features.py", line 952, in encode_example example = cast_to_python_objects(example) File "/home/anaconda3/lib/python3.6/site-packages/datasets/features.py", line 219, in cast_to_python_ob return _cast_to_python_objects(obj)[0] File "/home/anaconda3/lib/python3.6/site-packages/datasets/features.py", line 165, in _cast_to_python_o import torch File "/home/anaconda3/lib/python3.6/site-packages/torch/__init__.py", line 188, in <module> _load_global_deps() File "/home/anaconda3/lib/python3.6/site-packages/torch/__init__.py", line 141, in _load_global_deps ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL) File "/home/anaconda3/lib/python3.6/ctypes/__init__.py", line 348, in __init__ self._handle = _dlopen(self._name, mode) OSError: dlopen: cannot load any more object with static TLS During handling of the above exception, another exception occurred: Traceback (most recent call last): File "download_hub_opus100.py", line 9, in <module> this_dataset = load_dataset('opus100', language_pair) File "/home/anaconda3/lib/python3.6/site-packages/datasets/load.py", line 748, in load_dataset use_auth_token=use_auth_token, File "/home/anaconda3/lib/python3.6/site-packages/datasets/builder.py", line 575, in download_and_prepa dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/anaconda3/lib/python3.6/site-packages/datasets/builder.py", line 658, in _download_and_prep + str(e) OSError: Cannot find data file. Original error: dlopen: cannot load any more object with static TLS ## Environment info - `datasets` version: 1.8.0 - Platform: Linux-3.13.0-32-generic-x86_64-with-debian-jessie-sid - Python version: 3.6.6 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2645/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2645/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2644
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2644/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2644/comments
https://api.github.com/repos/huggingface/datasets/issues/2644/events
https://github.com/huggingface/datasets/issues/2644
944,254,748
MDU6SXNzdWU5NDQyNTQ3NDg=
2,644
Batched `map` not allowed to return 0 items
{ "login": "pcuenca", "id": 1177582, "node_id": "MDQ6VXNlcjExNzc1ODI=", "avatar_url": "https://avatars.githubusercontent.com/u/1177582?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pcuenca", "html_url": "https://github.com/pcuenca", "followers_url": "https://api.github.com/users/pcuenca/followers", "following_url": "https://api.github.com/users/pcuenca/following{/other_user}", "gists_url": "https://api.github.com/users/pcuenca/gists{/gist_id}", "starred_url": "https://api.github.com/users/pcuenca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pcuenca/subscriptions", "organizations_url": "https://api.github.com/users/pcuenca/orgs", "repos_url": "https://api.github.com/users/pcuenca/repos", "events_url": "https://api.github.com/users/pcuenca/events{/privacy}", "received_events_url": "https://api.github.com/users/pcuenca/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi ! Thanks for reporting. Indeed it looks like type inference makes it fail. We should probably just ignore this step until a non-empty batch is passed.", "Sounds good! Do you want me to propose a PR? I'm quite busy right now, but if it's not too urgent I could take a look next week.", "Sure if you're interested feel free to open a PR :)\r\n\r\nYou can also ping me anytime if you have questions or if I can help !", "Sorry to ping you, @lhoestq, did you have a chance to take a look at the proposed PR? Thank you!", "Yes and it's all good, thank you :)\r\n\r\nFeel free to close this issue if it's good for you", "Everything's good, thanks!" ]
1,626,256,699,000
1,627,311,315,000
1,627,311,315,000
CONTRIBUTOR
null
## Describe the bug I'm trying to use `map` to filter a large dataset by selecting rows that match an expensive condition (files referenced by one of the columns need to exist in the filesystem, so we have to `stat` them). According to [the documentation](https://huggingface.co/docs/datasets/processing.html#augmenting-the-dataset), `a batch mapped function can take as input a batch of size N and return a batch of size M where M can be greater or less than N and can even be zero`. However, when the returned batch has a size of zero (neither item in the batch fulfilled the condition), we get an `index out of bounds` error. I think that `arrow_writer.py` is [trying to infer the returned types using the first element returned](https://github.com/huggingface/datasets/blob/master/src/datasets/arrow_writer.py#L100), but no elements were returned in this case. For this error to happen, I'm returning a dictionary that contains empty lists for the keys I want to keep, see below. If I return an empty dictionary instead (no keys), then a different error eventually occurs. ## Steps to reproduce the bug ```python def select_rows(examples): # `key` is a column name that exists in the original dataset # The following line simulates no matches found, so we return an empty batch result = {'key': []} return result filtered_dataset = dataset.map( select_rows, remove_columns = dataset.column_names, batched = True, num_proc = 1, desc = "Selecting rows with images that exist" ) ``` The code above immediately triggers the exception. If we use the following instead: ```python def select_rows(examples): # `key` is a column name that exists in the original dataset result = {'key': []} # or defaultdict or whatever # code to check for condition and append elements to result # some_items_found will be set to True if there were any matching elements in the batch return result if some_items_found else {} ``` Then it _seems_ to work, but it eventually fails with some sort of schema error. I believe it may happen when an empty batch is followed by a non-empty one, but haven't set up a test to verify it. In my opinion, returning a dictionary with empty lists and valid column names should be accepted as a valid result with zero items. ## Expected results The dataset would be filtered and only the matching fields would be returned. ## Actual results An exception is encountered, as described. Using a workaround makes it fail further along the line. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.1.dev0 - Platform: Linux-5.4.0-53-generic-x86_64-with-glibc2.17 - Python version: 3.8.10 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2644/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2644/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2643
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2643/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2643/comments
https://api.github.com/repos/huggingface/datasets/issues/2643/events
https://github.com/huggingface/datasets/issues/2643
944,220,273
MDU6SXNzdWU5NDQyMjAyNzM=
2,643
Enum used in map functions will raise a RecursionError with dill.
{ "login": "jorgeecardona", "id": 100702, "node_id": "MDQ6VXNlcjEwMDcwMg==", "avatar_url": "https://avatars.githubusercontent.com/u/100702?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jorgeecardona", "html_url": "https://github.com/jorgeecardona", "followers_url": "https://api.github.com/users/jorgeecardona/followers", "following_url": "https://api.github.com/users/jorgeecardona/following{/other_user}", "gists_url": "https://api.github.com/users/jorgeecardona/gists{/gist_id}", "starred_url": "https://api.github.com/users/jorgeecardona/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jorgeecardona/subscriptions", "organizations_url": "https://api.github.com/users/jorgeecardona/orgs", "repos_url": "https://api.github.com/users/jorgeecardona/repos", "events_url": "https://api.github.com/users/jorgeecardona/events{/privacy}", "received_events_url": "https://api.github.com/users/jorgeecardona/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "I'm running into this as well. (Thank you so much for reporting @jorgeecardona — was staring at this massive stack trace and unsure what exactly was wrong!)", "Hi ! Thanks for reporting :)\r\n\r\nUntil this is fixed on `dill`'s side, we could implement a custom saving in our Pickler indefined in utils.py_utils.py\r\nThere is already a suggestion in this message about how to do it:\r\nhttps://github.com/uqfoundation/dill/issues/250#issuecomment-852566284\r\n\r\nLet me know if such a workaround could help, and feel free to open a PR if you want to contribute !", "I have the same bug.\r\nthe code is as follows:\r\n![image](https://user-images.githubusercontent.com/84262181/139785849-620dd4ac-86ce-4212-8163-942bbca305aa.png)\r\nthe error is: \r\n![image](https://user-images.githubusercontent.com/84262181/139785899-88a9bd75-c60b-45a5-b819-830c7c096f3d.png)\r\n\r\nLook for the solution for this bug.", "Hi ! I think your RecursionError comes from a different issue @BitcoinNLPer , could you open a separate issue please ?\r\n\r\nAlso which dataset are you using ? I tried loading `CodedotAI/code_clippy` but I get a different error\r\n```python\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/quentinlhoest/Desktop/hf/datasets/src/datasets/load.py\", line 1615, in load_dataset\r\n **config_kwargs,\r\n File \"/Users/quentinlhoest/Desktop/hf/datasets/src/datasets/load.py\", line 1446, in load_dataset_builder\r\n builder_cls = import_main_class(dataset_module.module_path)\r\n File \"/Users/quentinlhoest/Desktop/hf/datasets/src/datasets/load.py\", line 101, in import_main_class\r\n module = importlib.import_module(module_path)\r\n File \"/Users/quentinlhoest/.virtualenvs/hf-datasets/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/Users/quentinlhoest/.cache/huggingface/modules/datasets_modules/datasets/CodedotAI___code_clippy/d332f69d036e8c80f47bc9a96d676c3fa30cb50af7bb81e2d4d12e80b83efc4d/code_clippy.py\", line 66, in <module>\r\n url_elements = results.find_all(\"a\")\r\nAttributeError: 'NoneType' object has no attribute 'find_all'\r\n```" ]
1,626,254,168,000
1,635,846,671,000
null
NONE
null
## Describe the bug Enums used in functions pass to `map` will fail at pickling with a maximum recursion exception as described here: https://github.com/uqfoundation/dill/issues/250#issuecomment-852566284 In my particular case, I use an enum to define an argument with fixed options using the `TraininigArguments` dataclass as base class and the `HfArgumentParser`. In the same file I use a `ds.map` that tries to pickle the content of the module including the definition of the enum that runs into the dill bug described above. ## Steps to reproduce the bug ```python from datasets import load_dataset from enum import Enum class A(Enum): a = 'a' def main(): a = A.a def f(x): return {} if a == a.a else x ds = load_dataset('cnn_dailymail', '3.0.0')['test'] ds = ds.map(f, num_proc=15) if __name__ == "__main__": main() ``` ## Expected results The known problem with dill could be prevented as explained in the link above (workaround.) Since `HFArgumentParser` nicely uses the enum class for choices it makes sense to also deal with this bug under the hood. ## Actual results ```python File "/home/xxxx/miniconda3/lib/python3.8/site-packages/dill/_dill.py", line 1373, in save_type pickler.save_reduce(_create_type, (type(obj), obj.__name__, File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 690, in save_reduce save(args) File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 558, in save f(self, obj) # Call unbound method with explicit self File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 899, in save_tuple save(element) File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 534, in save self.framer.commit_frame() File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 220, in commit_frame if f.tell() >= self._FRAME_SIZE_TARGET or force: RecursionError: maximum recursion depth exceeded while calling a Python object ``` ## Environment info - `datasets` version: 1.8.0 - Platform: Linux-5.9.0-4-amd64-x86_64-with-glibc2.10 - Python version: 3.8.5 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2643/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2643/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2642
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2642/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2642/comments
https://api.github.com/repos/huggingface/datasets/issues/2642/events
https://github.com/huggingface/datasets/issues/2642
944,175,697
MDU6SXNzdWU5NDQxNzU2OTc=
2,642
Support multi-worker with streaming dataset (IterableDataset).
{ "login": "cccntu", "id": 31893406, "node_id": "MDQ6VXNlcjMxODkzNDA2", "avatar_url": "https://avatars.githubusercontent.com/u/31893406?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cccntu", "html_url": "https://github.com/cccntu", "followers_url": "https://api.github.com/users/cccntu/followers", "following_url": "https://api.github.com/users/cccntu/following{/other_user}", "gists_url": "https://api.github.com/users/cccntu/gists{/gist_id}", "starred_url": "https://api.github.com/users/cccntu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cccntu/subscriptions", "organizations_url": "https://api.github.com/users/cccntu/orgs", "repos_url": "https://api.github.com/users/cccntu/repos", "events_url": "https://api.github.com/users/cccntu/events{/privacy}", "received_events_url": "https://api.github.com/users/cccntu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi ! This is a great idea :)\r\nI think we could have something similar to what we have in `datasets.Dataset.map`, i.e. a `num_proc` parameter that tells how many processes to spawn to parallelize the data processing. \r\n\r\nRegarding AUTOTUNE, this could be a nice feature as well, we could see how to add it in a second step" ]
1,626,250,978,000
1,626,341,854,000
null
CONTRIBUTOR
null
**Is your feature request related to a problem? Please describe.** The current `.map` does not support multi-process, CPU can become bottleneck if the pre-processing is complex (e.g. t5 span masking). **Describe the solution you'd like** Ideally `.map` should support multi-worker like tfds, with `AUTOTUNE`. **Describe alternatives you've considered** A simpler solution is to shard the dataset and process it in parallel with pytorch dataloader. The shard does not need to be of equal size. * https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset **Additional context**
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2642/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2642/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2641
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2641/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2641/comments
https://api.github.com/repos/huggingface/datasets/issues/2641/events
https://github.com/huggingface/datasets/issues/2641
943,838,085
MDU6SXNzdWU5NDM4MzgwODU=
2,641
load_dataset("financial_phrasebank") NonMatchingChecksumError
{ "login": "courtmckay", "id": 13956255, "node_id": "MDQ6VXNlcjEzOTU2MjU1", "avatar_url": "https://avatars.githubusercontent.com/u/13956255?v=4", "gravatar_id": "", "url": "https://api.github.com/users/courtmckay", "html_url": "https://github.com/courtmckay", "followers_url": "https://api.github.com/users/courtmckay/followers", "following_url": "https://api.github.com/users/courtmckay/following{/other_user}", "gists_url": "https://api.github.com/users/courtmckay/gists{/gist_id}", "starred_url": "https://api.github.com/users/courtmckay/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/courtmckay/subscriptions", "organizations_url": "https://api.github.com/users/courtmckay/orgs", "repos_url": "https://api.github.com/users/courtmckay/repos", "events_url": "https://api.github.com/users/courtmckay/events{/privacy}", "received_events_url": "https://api.github.com/users/courtmckay/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi! It's probably because this dataset is stored on google drive and it has a per day quota limit. It should work if you retry, I was able to initiate the download.\r\n\r\nSimilar issue [here](https://github.com/huggingface/datasets/issues/2646)", "Hi ! Loading the dataset works on my side as well.\r\nFeel free to try again and let us know if it works for you know", "Thank you! I've been trying periodically for the past month, and no luck yet with this particular dataset. Just tried again and still hitting the checksum error.\r\n\r\nCode:\r\n\r\n`dataset = load_dataset(\"financial_phrasebank\", \"sentences_allagree\") `\r\n\r\nTraceback:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nNonMatchingChecksumError Traceback (most recent call last)\r\n<ipython-input-2-55cc2144f31e> in <module>\r\n----> 1 dataset = load_dataset(\"financial_phrasebank\", \"sentences_allagree\")\r\n\r\n/opt/conda/lib/python3.7/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, task, streaming, **config_kwargs)\r\n 859 ignore_verifications=ignore_verifications,\r\n 860 try_from_hf_gcs=try_from_hf_gcs,\r\n--> 861 use_auth_token=use_auth_token,\r\n 862 )\r\n 863 \r\n\r\n/opt/conda/lib/python3.7/site-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs)\r\n 582 if not downloaded_from_gcs:\r\n 583 self._download_and_prepare(\r\n--> 584 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs\r\n 585 )\r\n 586 # Sync info\r\n\r\n/opt/conda/lib/python3.7/site-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs)\r\n 642 if verify_infos:\r\n 643 verify_checksums(\r\n--> 644 self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), \"dataset source files\"\r\n 645 )\r\n 646 \r\n\r\n/opt/conda/lib/python3.7/site-packages/datasets/utils/info_utils.py in verify_checksums(expected_checksums, recorded_checksums, verification_name)\r\n 38 if len(bad_urls) > 0:\r\n 39 error_msg = \"Checksums didn't match\" + for_verification_name + \":\\n\"\r\n---> 40 raise NonMatchingChecksumError(error_msg + str(bad_urls))\r\n 41 logger.info(\"All the checksums matched successfully\" + for_verification_name)\r\n 42 \r\n\r\nNonMatchingChecksumError: Checksums didn't match for dataset source files:\r\n['https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip']\r\n```" ]
1,626,211,309,000
1,626,701,170,000
null
NONE
null
## Describe the bug Attempting to download the financial_phrasebank dataset results in a NonMatchingChecksumError ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("financial_phrasebank", 'sentences_allagree') ``` ## Expected results I expect to see the financial_phrasebank dataset downloaded successfully ## Actual results NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip'] ## Environment info - `datasets` version: 1.9.0 - Platform: Linux-4.14.232-177.418.amzn2.x86_64-x86_64-with-debian-10.6 - Python version: 3.7.10 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2641/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2641/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2640
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2640/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2640/comments
https://api.github.com/repos/huggingface/datasets/issues/2640/events
https://github.com/huggingface/datasets/pull/2640
943,591,055
MDExOlB1bGxSZXF1ZXN0Njg5MjAxMDkw
2,640
Fix docstrings
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[]
1,626,192,554,000
1,626,331,861,000
1,626,329,172,000
MEMBER
null
Fix rendering of some docstrings.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2640/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2640/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2640", "html_url": "https://github.com/huggingface/datasets/pull/2640", "diff_url": "https://github.com/huggingface/datasets/pull/2640.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2640.patch", "merged_at": 1626329172000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2639
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2639/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2639/comments
https://api.github.com/repos/huggingface/datasets/issues/2639/events
https://github.com/huggingface/datasets/pull/2639
943,527,463
MDExOlB1bGxSZXF1ZXN0Njg5MTQ3NDE5
2,639
Refactor patching to specific submodule
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,188,925,000
1,626,195,169,000
1,626,195,169,000
MEMBER
null
Minor reorganization of the code, so that additional patching functions (not related to streaming) might be created. In relation with the initial approach followed in #2631.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2639/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2639/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2639", "html_url": "https://github.com/huggingface/datasets/pull/2639", "diff_url": "https://github.com/huggingface/datasets/pull/2639.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2639.patch", "merged_at": 1626195168000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2638
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2638/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2638/comments
https://api.github.com/repos/huggingface/datasets/issues/2638/events
https://github.com/huggingface/datasets/pull/2638
943,484,913
MDExOlB1bGxSZXF1ZXN0Njg5MTA5NTg1
2,638
Streaming for the Json loader
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "A note is that I think we should add a few indicator of status (as mentioned by @stas00 in #2649), probably at the (1) downloading, (2) extracting and (3) reading steps. In particular when loading many very large files it's interesting to know a bit where we are in the process.", "I tested locally, and the builtin `json` loader is 4x slower than `pyarrow.json`. Thanks for the comment @albertvillanova !\r\n\r\nTherefore I switched back to using `pyarrow.json`, but only on the batch that is read. This way we don't have to deal with its `block_size`, and it only loads in memory one batch at a time." ]
1,626,187,026,000
1,626,451,172,000
1,626,451,171,000
MEMBER
null
It was not using `open` in the builder. Therefore `pyarrow.json.read_json` was downloading the full file to start yielding rows. Moreover, it appeared that `pyarrow.json.read_json` was not really suited for streaming as it was downloading too much data and failing if `block_size` was not properly configured (related to #2573). So I switched to using `open` which is extended to support reading from remote file progressively, and I removed the pyarrow json reader which was not practical. Instead, I'm using the classical `json.loads` from the standard library.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2638/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2638/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2638", "html_url": "https://github.com/huggingface/datasets/pull/2638", "diff_url": "https://github.com/huggingface/datasets/pull/2638.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2638.patch", "merged_at": 1626451171000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2637
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2637/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2637/comments
https://api.github.com/repos/huggingface/datasets/issues/2637/events
https://github.com/huggingface/datasets/issues/2637
943,290,736
MDU6SXNzdWU5NDMyOTA3MzY=
2,637
Add the CIDEr metric?
{ "login": "zuujhyt", "id": 75845952, "node_id": "MDQ6VXNlcjc1ODQ1OTUy", "avatar_url": "https://avatars.githubusercontent.com/u/75845952?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zuujhyt", "html_url": "https://github.com/zuujhyt", "followers_url": "https://api.github.com/users/zuujhyt/followers", "following_url": "https://api.github.com/users/zuujhyt/following{/other_user}", "gists_url": "https://api.github.com/users/zuujhyt/gists{/gist_id}", "starred_url": "https://api.github.com/users/zuujhyt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zuujhyt/subscriptions", "organizations_url": "https://api.github.com/users/zuujhyt/orgs", "repos_url": "https://api.github.com/users/zuujhyt/repos", "events_url": "https://api.github.com/users/zuujhyt/events{/privacy}", "received_events_url": "https://api.github.com/users/zuujhyt/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "same" ]
1,626,178,971,000
1,632,729,425,000
null
NONE
null
Hi, I find the api in https://huggingface.co/metrics quite useful. I am playing around with video/image captioning task, where CIDEr is a popular metric. Do you plan to add this into the HF ```datasets``` library? Thanks.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2637/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2637/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2636
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2636/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2636/comments
https://api.github.com/repos/huggingface/datasets/issues/2636/events
https://github.com/huggingface/datasets/pull/2636
943,044,514
MDExOlB1bGxSZXF1ZXN0Njg4NzEyMTY4
2,636
Streaming for the Pandas loader
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,167,901,000
1,626,187,044,000
1,626,187,043,000
MEMBER
null
It was not using open in the builder. Therefore pd.read_pickle could fail when streaming from a private repo for example. Indeed, when streaming, open is extended to support reading from remote files and handles authentication to the HF Hub
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2636/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2636/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2636", "html_url": "https://github.com/huggingface/datasets/pull/2636", "diff_url": "https://github.com/huggingface/datasets/pull/2636.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2636.patch", "merged_at": 1626187043000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2635
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2635/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2635/comments
https://api.github.com/repos/huggingface/datasets/issues/2635/events
https://github.com/huggingface/datasets/pull/2635
943,030,999
MDExOlB1bGxSZXF1ZXN0Njg4Njk5OTM5
2,635
Streaming for the CSV loader
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,167,338,000
1,626,189,578,000
1,626,189,577,000
MEMBER
null
It was not using `open` in the builder. Therefore `pd.read_csv` was downloading the full file to start yielding rows. Indeed, when streaming, `open` is extended to support reading from remote file progressively.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2635/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2635/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2635", "html_url": "https://github.com/huggingface/datasets/pull/2635", "diff_url": "https://github.com/huggingface/datasets/pull/2635.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2635.patch", "merged_at": 1626189577000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2634
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2634/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2634/comments
https://api.github.com/repos/huggingface/datasets/issues/2634/events
https://github.com/huggingface/datasets/pull/2634
942,805,621
MDExOlB1bGxSZXF1ZXN0Njg4NDk2Mzc2
2,634
Inject ASR template for lj_speech dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[]
1,626,156,294,000
1,626,167,109,000
1,626,167,109,000
MEMBER
null
Related to: #2565, #2633. cc: @lewtun
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2634/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2634/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2634", "html_url": "https://github.com/huggingface/datasets/pull/2634", "diff_url": "https://github.com/huggingface/datasets/pull/2634.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2634.patch", "merged_at": 1626167109000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2633
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2633/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2633/comments
https://api.github.com/repos/huggingface/datasets/issues/2633/events
https://github.com/huggingface/datasets/pull/2633
942,396,414
MDExOlB1bGxSZXF1ZXN0Njg4MTMwOTA5
2,633
Update ASR tags
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[]
1,626,119,911,000
1,626,155,126,000
1,626,155,113,000
MEMBER
null
This PR updates the ASR tags of the 5 datasets added in #2565 following the change of task categories in #2620
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2633/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2633/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2633", "html_url": "https://github.com/huggingface/datasets/pull/2633", "diff_url": "https://github.com/huggingface/datasets/pull/2633.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2633.patch", "merged_at": 1626155113000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2632
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2632/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2632/comments
https://api.github.com/repos/huggingface/datasets/issues/2632/events
https://github.com/huggingface/datasets/pull/2632
942,293,727
MDExOlB1bGxSZXF1ZXN0Njg4MDQyMjcw
2,632
add image-classification task template
{ "login": "nateraw", "id": 32437151, "node_id": "MDQ6VXNlcjMyNDM3MTUx", "avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nateraw", "html_url": "https://github.com/nateraw", "followers_url": "https://api.github.com/users/nateraw/followers", "following_url": "https://api.github.com/users/nateraw/following{/other_user}", "gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}", "starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nateraw/subscriptions", "organizations_url": "https://api.github.com/users/nateraw/orgs", "repos_url": "https://api.github.com/users/nateraw/repos", "events_url": "https://api.github.com/users/nateraw/events{/privacy}", "received_events_url": "https://api.github.com/users/nateraw/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Awesome!", "Thanks for adding a new task template - great work @nateraw 🚀 !" ]
1,626,111,663,000
1,626,191,068,000
1,626,190,096,000
CONTRIBUTOR
null
Snippet below is the tl;dr, but you can try it out directly here: [![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/gist/nateraw/005c025d41f0e48ae3d4ee61c0f20b70/image-classification-task-template-demo.ipynb) ```python from datasets import load_dataset ds = load_dataset('nateraw/image-folder', data_files='PetImages/') # DatasetDict({ # train: Dataset({ # features: ['file', 'labels'], # num_rows: 23410 # }) # }) ds = ds.prepare_for_task('image-classification') # DatasetDict({ # train: Dataset({ # features: ['image_file_path', 'labels'], # num_rows: 23410 # }) # }) ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2632/reactions", "total_count": 3, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 3, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2632/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2632", "html_url": "https://github.com/huggingface/datasets/pull/2632", "diff_url": "https://github.com/huggingface/datasets/pull/2632.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2632.patch", "merged_at": 1626190095000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2631
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2631/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2631/comments
https://api.github.com/repos/huggingface/datasets/issues/2631/events
https://github.com/huggingface/datasets/pull/2631
942,242,271
MDExOlB1bGxSZXF1ZXN0Njg3OTk3MzM2
2,631
Delete extracted files when loading dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Sure @stas00, it is still a draft pull request. :)", "Yes, I noticed it after reviewing - my apologies.", "The problem with this approach is that it also deletes the downloaded files (if they need not be extracted). 😟 ", "> The problem with this approach is that it also deletes the downloaded files (if they need not be extracted). worried\r\n\r\nRight! These probably should not be deleted by default, but having an option for those users who are tight on disc space?", "> Right! These probably should not be deleted by default, but having an option for those users who are tight on disc space?\r\n\r\nI propose leaving that for another PR, and leave this one handling only with \"extracted\" files. Is it OK for you? :) ", "Awesome thanks !\r\nI just have one question: what about image/audio datasets for which we store the path to the extracted file on the arrow data ?\r\nIn this case the default should be to keep the extracted files.\r\n\r\nSo for now I would just make `keep_extracted=True` by default until we have a way to separate extracted files that can be deleted and extracted files that are actual resources of the dataset.", "@lhoestq, current implementation only deletes extracted \"files\", not extracted \"directories\", as it uses: `os.remove(path)`. I'm going to add a filter on files, so that this line does not throw an exception when passed a directory.\r\n\r\nFor audio datasets, the audio files are inside the extracted \"directory\", so they are not deleted.", "I'm still more in favor of having `keep_extracted=True` by default:\r\n- When working with a dataset, you call `load_dataset` many times. By default we want to keep objects extracted to not extract them over and over again (it can take a long time). Then once you know what you're doing and you want to optimize disk space, you can do `keep_extracted=False`. Deleting the extracted files by default is a regression that can lead to slow downs for people calling `load_dataset` many times, which is common when experimenting\r\n- This behavior doesn't sound natural as a default behavior. In the rest of the library, things are cached and not removed unless you explicitly say do (`map` caching for example). Moreover the function in the download manager is called `download_and_extract`, not `download_and_extract_and_remove_extracted_files`\r\n\r\nLet me know what you think !", "I think the main issue is that after doing some work users typically move on to other datasets and the amount of disc space used keeps on growing. So your logic is very sound and perhaps what's really needed is a cleansweep function that can go through **all** datasets and clean them up to the desired degree:\r\n\r\n- delete all extracted files\r\n- delete all sources\r\n- delete all caches\r\n- delete all caches that haven't been accessed in 6 months\r\n- delete completely old datasets that haven't been accessed in 6 months\r\n- more?\r\n\r\nSo a user can launch a little application, choose what they want to clean up and voila they have just freed up a huge amount of disc space. Makes me think of Ubuntu Tweak's Janitor app - very useful.\r\n\r\nAt the moment, this process of linting is very daunting and error-prone, especially due to all those dirs/files with hash names.", "@stas00 I've had the same idea. Instead of the full-fledged app, a simpler approach would be to add a new command to the CLI.", "oh, CLI would be perfect. I didn't mean to request a GUI-one specifically, was just using it as an example.\r\n\r\nOne could even do a crontab to delete old datasets that haven't been accesses in X months.", "@lhoestq I totally agree with you. I'm addressing that change.\r\n\r\n@stas00, @mariosasko, that could eventually be addressed in another pull request. The objective of this PR is:\r\n- add an option to pass to `load_dataset`, so that extracted files are deleted\r\n- do this deletion file per file, once the file has been already used to generate the cache Arrow file", "I also like the idea of having a CLI tool to help users clean their cache and save disk space, good idea !" ]
1,626,107,973,000
1,626,685,699,000
1,626,685,699,000
MEMBER
null
Close #2481, close #2604, close #2591. cc: @stas00, @thomwolf, @BirgerMoell
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2631/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2631/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2631", "html_url": "https://github.com/huggingface/datasets/pull/2631", "diff_url": "https://github.com/huggingface/datasets/pull/2631.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2631.patch", "merged_at": 1626685698000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2630
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2630/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2630/comments
https://api.github.com/repos/huggingface/datasets/issues/2630/events
https://github.com/huggingface/datasets/issues/2630
942,102,956
MDU6SXNzdWU5NDIxMDI5NTY=
2,630
Progress bars are not properly rendered in Jupyter notebook
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "To add my experience when trying to debug this issue:\r\n\r\nSeems like previously the workaround given [here](https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308) worked around this issue. But with the latest version of jupyter/tqdm I still get terminal warnings that IPython tried to send a message from a forked process.", "Hi @mludv, thanks for the hint!!! :) \r\n\r\nWe will definitely take it into account to try to fix this issue... It seems somehow related to `multiprocessing` and `tqdm`..." ]
1,626,098,833,000
1,643,903,733,000
1,643,903,733,000
MEMBER
null
## Describe the bug The progress bars are not Jupyter widgets; regular progress bars appear (like in a terminal). ## Steps to reproduce the bug ```python ds.map(tokenize, num_proc=10) ``` ## Expected results Jupyter widgets displaying the progress bars. ## Actual results Simple plane progress bars. cc: Reported by @thomwolf
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2630/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2630/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2629
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2629/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2629/comments
https://api.github.com/repos/huggingface/datasets/issues/2629/events
https://github.com/huggingface/datasets/issues/2629
941,819,205
MDU6SXNzdWU5NDE4MTkyMDU=
2,629
Load datasets from the Hub without requiring a dataset script
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "This is so cool, let us know if we can help with anything on the hub side (@Pierrci @elishowk) 🎉 " ]
1,626,079,517,000
1,629,901,088,000
1,629,901,088,000
MEMBER
null
As a user I would like to be able to upload my csv/json/text/parquet/etc. files in a dataset repository on the Hugging Face Hub and be able to load this dataset with `load_dataset` without having to implement a dataset script. Moreover I would like to be able to specify which file goes into which split using the `data_files` argument. This feature should be compatible with private repositories and dataset streaming. This can be implemented by checking the extension of the files in the dataset repository and then by using the right dataset builder that is already packaged in the library (csv/json/text/parquet/etc.)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2629/reactions", "total_count": 11, "+1": 0, "-1": 0, "laugh": 0, "hooray": 2, "confused": 0, "heart": 7, "rocket": 2, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2629/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2628
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2628/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2628/comments
https://api.github.com/repos/huggingface/datasets/issues/2628/events
https://github.com/huggingface/datasets/pull/2628
941,676,404
MDExOlB1bGxSZXF1ZXN0Njg3NTE0NzQz
2,628
Use ETag of remote data files
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[]
1,626,066,610,000
1,626,098,914,000
1,626,079,207,000
MEMBER
null
Use ETag of remote data files to create config ID. Related to #2616.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2628/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2628/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2628", "html_url": "https://github.com/huggingface/datasets/pull/2628", "diff_url": "https://github.com/huggingface/datasets/pull/2628.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2628.patch", "merged_at": 1626079207000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2627
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2627/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2627/comments
https://api.github.com/repos/huggingface/datasets/issues/2627/events
https://github.com/huggingface/datasets/pull/2627
941,503,349
MDExOlB1bGxSZXF1ZXN0Njg3MzczMDg1
2,627
Minor fix tests with Windows paths
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[]
1,626,026,148,000
1,626,098,927,000
1,626,078,890,000
MEMBER
null
Minor fix tests with Windows paths.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2627/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2627/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2627", "html_url": "https://github.com/huggingface/datasets/pull/2627", "diff_url": "https://github.com/huggingface/datasets/pull/2627.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2627.patch", "merged_at": 1626078890000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2626
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2626/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2626/comments
https://api.github.com/repos/huggingface/datasets/issues/2626/events
https://github.com/huggingface/datasets/pull/2626
941,497,830
MDExOlB1bGxSZXF1ZXN0Njg3MzY4OTMz
2,626
Use correct logger in metrics.py
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[]
1,626,024,150,000
1,626,098,934,000
1,626,069,269,000
CONTRIBUTOR
null
Fixes #2624
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2626/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2626/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2626", "html_url": "https://github.com/huggingface/datasets/pull/2626", "diff_url": "https://github.com/huggingface/datasets/pull/2626.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2626.patch", "merged_at": 1626069269000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2625
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2625/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2625/comments
https://api.github.com/repos/huggingface/datasets/issues/2625/events
https://github.com/huggingface/datasets/issues/2625
941,439,922
MDU6SXNzdWU5NDE0Mzk5MjI=
2,625
⚛️😇⚙️🔑
{ "login": "hustlen0mics", "id": 50596661, "node_id": "MDQ6VXNlcjUwNTk2NjYx", "avatar_url": "https://avatars.githubusercontent.com/u/50596661?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hustlen0mics", "html_url": "https://github.com/hustlen0mics", "followers_url": "https://api.github.com/users/hustlen0mics/followers", "following_url": "https://api.github.com/users/hustlen0mics/following{/other_user}", "gists_url": "https://api.github.com/users/hustlen0mics/gists{/gist_id}", "starred_url": "https://api.github.com/users/hustlen0mics/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hustlen0mics/subscriptions", "organizations_url": "https://api.github.com/users/hustlen0mics/orgs", "repos_url": "https://api.github.com/users/hustlen0mics/repos", "events_url": "https://api.github.com/users/hustlen0mics/events{/privacy}", "received_events_url": "https://api.github.com/users/hustlen0mics/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[]
1,626,005,674,000
1,626,069,359,000
1,626,069,359,000
NONE
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2625/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2625/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2624
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2624/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2624/comments
https://api.github.com/repos/huggingface/datasets/issues/2624/events
https://github.com/huggingface/datasets/issues/2624
941,318,247
MDU6SXNzdWU5NDEzMTgyNDc=
2,624
can't set verbosity for `metric.py`
{ "login": "thomas-happify", "id": 66082334, "node_id": "MDQ6VXNlcjY2MDgyMzM0", "avatar_url": "https://avatars.githubusercontent.com/u/66082334?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thomas-happify", "html_url": "https://github.com/thomas-happify", "followers_url": "https://api.github.com/users/thomas-happify/followers", "following_url": "https://api.github.com/users/thomas-happify/following{/other_user}", "gists_url": "https://api.github.com/users/thomas-happify/gists{/gist_id}", "starred_url": "https://api.github.com/users/thomas-happify/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomas-happify/subscriptions", "organizations_url": "https://api.github.com/users/thomas-happify/orgs", "repos_url": "https://api.github.com/users/thomas-happify/repos", "events_url": "https://api.github.com/users/thomas-happify/events{/privacy}", "received_events_url": "https://api.github.com/users/thomas-happify/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Thanks @thomas-happify for reporting and thanks @mariosasko for the fix." ]
1,625,948,625,000
1,626,069,269,000
1,626,069,269,000
NONE
null
## Describe the bug ``` [2021-07-10 20:13:11,528][datasets.utils.filelock][INFO] - Lock 139705371374976 acquired on /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow.lock [2021-07-10 20:13:11,529][datasets.arrow_writer][INFO] - Done writing 32 examples in 6100 bytes /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow. [2021-07-10 20:13:11,531][datasets.arrow_dataset][INFO] - Set __getitem__(key) output type to python objects for no columns (when key is int or slice) and don't output other (un-formatted) columns. [2021-07-10 20:13:11,543][/conda/envs/myenv/lib/python3.8/site-packages/datasets/metric.py][INFO] - Removing /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow ``` As you can see, `datasets` logging come from different places. `filelock`, `arrow_writer` & `arrow_dataset` comes from `datasets.*` which are expected However, `metric.py` logging comes from `/conda/envs/myenv/lib/python3.8/site-packages/datasets/` So when setting `datasets.utils.logging.set_verbosity_error()`, it still logs the last message which is annoying during evaluation. I had to do ``` logging.getLogger("/conda/envs/myenv/lib/python3.8/site-packages/datasets/metric").setLevel(logging.ERROR) ``` to fully mute these messages ## Expected results it shouldn't log these messages when setting `datasets.utils.logging.set_verbosity_error()` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: tried both 1.8.0 & 1.9.0 - Platform: Ubuntu 18.04.5 LTS - Python version: 3.8.10 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2624/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2624/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2623
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2623/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2623/comments
https://api.github.com/repos/huggingface/datasets/issues/2623/events
https://github.com/huggingface/datasets/pull/2623
941,265,342
MDExOlB1bGxSZXF1ZXN0Njg3MTk0MjM3
2,623
[Metrics] added wiki_split metrics
{ "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "patrickvonplaten", "id": 23423619, "node_id": "MDQ6VXNlcjIzNDIzNjE5", "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/patrickvonplaten", "html_url": "https://github.com/patrickvonplaten", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "type": "User", "site_admin": false }
[ { "login": "patrickvonplaten", "id": 23423619, "node_id": "MDQ6VXNlcjIzNDIzNjE5", "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/patrickvonplaten", "html_url": "https://github.com/patrickvonplaten", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "type": "User", "site_admin": false } ]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Looks all good to me thanks :)\r\nJust did some minor corrections in the docstring" ]
1,625,928,710,000
1,626,272,893,000
1,626,129,271,000
CONTRIBUTOR
null
Fixes: #2606 This pull request adds combine metrics for the wikisplit or English sentence split task Reviewer: @patrickvonplaten
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2623/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2623/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2623", "html_url": "https://github.com/huggingface/datasets/pull/2623", "diff_url": "https://github.com/huggingface/datasets/pull/2623.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2623.patch", "merged_at": 1626129271000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2622
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2622/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2622/comments
https://api.github.com/repos/huggingface/datasets/issues/2622/events
https://github.com/huggingface/datasets/issues/2622
941,127,785
MDU6SXNzdWU5NDExMjc3ODU=
2,622
Integration with AugLy
{ "login": "Darktex", "id": 890615, "node_id": "MDQ6VXNlcjg5MDYxNQ==", "avatar_url": "https://avatars.githubusercontent.com/u/890615?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Darktex", "html_url": "https://github.com/Darktex", "followers_url": "https://api.github.com/users/Darktex/followers", "following_url": "https://api.github.com/users/Darktex/following{/other_user}", "gists_url": "https://api.github.com/users/Darktex/gists{/gist_id}", "starred_url": "https://api.github.com/users/Darktex/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Darktex/subscriptions", "organizations_url": "https://api.github.com/users/Darktex/orgs", "repos_url": "https://api.github.com/users/Darktex/repos", "events_url": "https://api.github.com/users/Darktex/events{/privacy}", "received_events_url": "https://api.github.com/users/Darktex/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi,\r\n\r\nyou can define your own custom formatting with `Dataset.set_transform()` and then run the tokenizer with the batches of augmented data as follows:\r\n```python\r\ndset = load_dataset(\"imdb\", split=\"train\") # Let's say we are working with the IMDB dataset\r\ndset.set_transform(lambda ex: {\"text\": augly_text_augmentation(ex[\"text\"])}, columns=\"text\", output_all_columns=True)\r\ndataloader = torch.utils.data.DataLoader(dset, batch_size=32)\r\nfor epoch in range(5):\r\n for batch in dataloader:\r\n tokenizer_output = tokenizer(batch.pop(\"text\"), padding=True, truncation=True, return_tensors=\"pt\")\r\n batch.update(tokenizer_output)\r\n output = model(**batch)\r\n ...\r\n```" ]
1,625,875,389,000
1,626,023,291,000
null
NONE
null
**Is your feature request related to a problem? Please describe.** Facebook recently launched a library, [AugLy](https://github.com/facebookresearch/AugLy) , that has a unified API for augmentations for image, video and text. It would be pretty exciting to have it hooked up to HF libraries so that we can make NLP models robust to misspellings or to punctuation, or emojis etc. Plus, with Transformers supporting more CV use cases, having augmentations support becomes crucial. **Describe the solution you'd like** The biggest difference between augmentations and preprocessing is that preprocessing happens only once, but you are running augmentations once per epoch. AugLy operates on text directly, so this breaks the typical workflow where we would run the tokenizer once, set format to pt tensors and be ready for the Dataloader. **Describe alternatives you've considered** One possible way of implementing these is to make a custom Dataset class where getitem(i) runs the augmentation and the tokenizer every time, though this would slow training down considerably given we wouldn't even run the tokenizer in batches.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2622/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2622/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false
https://api.github.com/repos/huggingface/datasets/issues/2621
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2621/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2621/comments
https://api.github.com/repos/huggingface/datasets/issues/2621/events
https://github.com/huggingface/datasets/pull/2621
940,916,446
MDExOlB1bGxSZXF1ZXN0Njg2OTE1Mzcw
2,621
Use prefix to allow exceed Windows MAX_PATH
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Does this mean the `FileNotFoundError` that avoids infinite loop can be removed?", "Yes, I think so...", "Or maybe we could leave it in case a relative path exceeds the MAX_PATH limit?", " > Or maybe we could leave it in case a relative path exceeds the MAX_PATH limit?\r\n\r\nWhat about converting relative paths to absolute?", "Nice ! Have you had a chance to test it on a windows machine with the max path limit enabled ? Afaik the CI doesn't have the path limit", "Sure @lhoestq: I've tested on my machine... And this fixes most of the tests... 😅 " ]
1,625,848,793,000
1,626,449,292,000
1,626,449,291,000
MEMBER
null
By using this prefix, you can exceed the Windows MAX_PATH limit. See: https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces Related to #2524, #2220.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2621/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2621/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2621", "html_url": "https://github.com/huggingface/datasets/pull/2621", "diff_url": "https://github.com/huggingface/datasets/pull/2621.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2621.patch", "merged_at": 1626449291000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2620
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2620/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2620/comments
https://api.github.com/repos/huggingface/datasets/issues/2620/events
https://github.com/huggingface/datasets/pull/2620
940,893,389
MDExOlB1bGxSZXF1ZXN0Njg2ODk3MDky
2,620
Add speech processing tasks
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Are there any `task_categories:automatic-speech-recognition` dataset for which we should update the tags ?", "> Are there any `task_categories:automatic-speech-recognition` dataset for which we should update the tags ?\r\n\r\nYes there's a few - I'll fix them tomorrow :)" ]
1,625,846,849,000
1,626,114,779,000
1,626,111,122,000
MEMBER
null
This PR replaces the `automatic-speech-recognition` task category with a broader `speech-processing` category. The tasks associated with this category are derived from the [SUPERB benchmark](https://arxiv.org/abs/2105.01051), and ASR is included in this set.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2620/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2620/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2620", "html_url": "https://github.com/huggingface/datasets/pull/2620", "diff_url": "https://github.com/huggingface/datasets/pull/2620.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2620.patch", "merged_at": 1626111122000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2619
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2619/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2619/comments
https://api.github.com/repos/huggingface/datasets/issues/2619/events
https://github.com/huggingface/datasets/pull/2619
940,858,236
MDExOlB1bGxSZXF1ZXN0Njg2ODY3NDA4
2,619
Add ASR task for SUPERB
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[ "Wait until #2620 is merged before pushing the README tags in this PR", "> Thanks!\r\n> \r\n> One question: aren't you adding `task_templates` to the `_info` method (and to the `dataset_infos.json`?\r\n\r\ngreat catch! i've now added the asr task template (along with a mapping from superb task -> template) and updated the `dataset_infos.json` :) ", "> Good!\r\n> \r\n> I have a suggested refactoring... Tell me what you think! :)\r\n\r\nyour approach is much more elegant - i've included your suggestions 🙏 " ]
1,625,843,985,000
1,626,339,358,000
1,626,180,018,000
MEMBER
null
This PR starts building up the SUPERB benchmark by including the ASR task as described in the [SUPERB paper](https://arxiv.org/abs/2105.01051) and `s3prl` [instructions](https://github.com/s3prl/s3prl/tree/v0.2.0/downstream#asr-automatic-speech-recognition). Usage: ```python from datasets import load_dataset asr = load_dataset("superb", "asr") # DatasetDict({ # train: Dataset({ # features: ['file', 'text', 'speaker_id', 'chapter_id', 'id'], # num_rows: 28539 # }) # validation: Dataset({ # features: ['file', 'text', 'speaker_id', 'chapter_id', 'id'], # num_rows: 2703 # }) # test: Dataset({ # features: ['file', 'text', 'speaker_id', 'chapter_id', 'id'], # num_rows: 2620 # }) # }) ``` I've used the GLUE benchmark as a guide for filling out the README. To move fast during the evaluation PoC I propose to merge one task at a time, so we can continue building the training / evaluation framework in parallel. Note: codewise this PR is ready for review - I'll add the missing YAML tags once #2620 is merged :)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2619/reactions", "total_count": 2, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 2, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2619/timeline
null
false
{ "url": "https://api.github.com/repos/huggingface/datasets/pulls/2619", "html_url": "https://github.com/huggingface/datasets/pull/2619", "diff_url": "https://github.com/huggingface/datasets/pull/2619.diff", "patch_url": "https://github.com/huggingface/datasets/pull/2619.patch", "merged_at": 1626180018000 }
true
https://api.github.com/repos/huggingface/datasets/issues/2618
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2618/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2618/comments
https://api.github.com/repos/huggingface/datasets/issues/2618/events
https://github.com/huggingface/datasets/issues/2618
940,852,640
MDU6SXNzdWU5NDA4NTI2NDA=
2,618
`filelock.py` Error
{ "login": "liyucheng09", "id": 27999909, "node_id": "MDQ6VXNlcjI3OTk5OTA5", "avatar_url": "https://avatars.githubusercontent.com/u/27999909?v=4", "gravatar_id": "", "url": "https://api.github.com/users/liyucheng09", "html_url": "https://github.com/liyucheng09", "followers_url": "https://api.github.com/users/liyucheng09/followers", "following_url": "https://api.github.com/users/liyucheng09/following{/other_user}", "gists_url": "https://api.github.com/users/liyucheng09/gists{/gist_id}", "starred_url": "https://api.github.com/users/liyucheng09/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/liyucheng09/subscriptions", "organizations_url": "https://api.github.com/users/liyucheng09/orgs", "repos_url": "https://api.github.com/users/liyucheng09/repos", "events_url": "https://api.github.com/users/liyucheng09/events{/privacy}", "received_events_url": "https://api.github.com/users/liyucheng09/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }
[]
{ "url": "", "html_url": "", "labels_url": "", "id": 0, "node_id": "", "number": 0, "title": "", "description": "", "creator": { "login": "", "id": 0, "node_id": "", "avatar_url": "", "gravatar_id": "", "url": "", "html_url": "", "followers_url": "", "following_url": "", "gists_url": "", "starred_url": "", "subscriptions_url": "", "organizations_url": "", "repos_url": "", "events_url": "", "received_events_url": "", "type": "", "site_admin": false }, "open_issues": 0, "closed_issues": 0, "state": "", "created_at": 0, "updated_at": 0, "due_on": 0, "closed_at": 0 }
[ "Hi @liyucheng09, thanks for reporting.\r\n\r\nApparently this issue has to do with your environment setup. One question: is your data in an NFS share? Some people have reported this error when using `fcntl` to write to an NFS share... If this is the case, then it might be that your NFS just may not be set up to provide file locks. You should ask your system administrator, or try these commands in the terminal:\r\n```shell\r\nsudo systemctl enable rpc-statd\r\nsudo systemctl start rpc-statd\r\n```" ]
1,625,843,569,000
1,626,070,830,000
null
NONE
null
## Describe the bug It seems that the `filelock.py` went error. ``` >>> ds=load_dataset('xsum') ^CTraceback (most recent call last): File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) OSError: [Errno 37] No locks available ``` According to error log, it is OSError, but there is an `except` in the `_acquire` function. ``` def _acquire(self): open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: fd = os.open(self._lock_file, open_mode) except (IOError, OSError): pass else: self._lock_file_fd = fd return None ``` I don't know why it stucked rather than `pass` directly. I am not quite familiar with filelock operation, so any help is highly appriciated. ## Steps to reproduce the bug ```python ds = load_dataset('xsum') ``` ## Expected results A clear and concise description of the expected results. ## Actual results ``` >>> ds=load_dataset('xsum') ^CTraceback (most recent call last): File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) OSError: [Errno 37] No locks available During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/load.py", line 818, in load_dataset use_auth_token=use_auth_token, File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/load.py", line 470, in prepare_module with FileLock(lock_path): File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 323, in __enter__ self.acquire() File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 272, in acquire self._acquire() File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) KeyboardInterrupt ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.0 - Platform: Linux-4.15.0-135-generic-x86_64-with-debian-buster-sid - Python version: 3.6.13 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2618/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2618/timeline
null
null
{ "url": "", "html_url": "", "diff_url": "", "patch_url": "", "merged_at": 0 }
false