debug
Browse files- bernice-pretrain-data.py +2 -1
bernice-pretrain-data.py
CHANGED
@@ -67,6 +67,7 @@ _URLS = {
|
|
67 |
"indic": ["indic_tweet_ids.txt.gz"]
|
68 |
}
|
69 |
|
|
|
70 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
71 |
class BernicePretrainData(datasets.GeneratorBasedBuilder):
|
72 |
"""Tweet IDs for the 2.5 billion multilingual tweets used to train Bernice, a Twitter encoder."""
|
@@ -141,7 +142,7 @@ class BernicePretrainData(datasets.GeneratorBasedBuilder):
|
|
141 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
142 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
143 |
for filepath in filepaths:
|
144 |
-
with
|
145 |
for line_number, instance in enumerate(f):
|
146 |
tweet_id, lang, year = instance.strip().split("\t")
|
147 |
yield tweet_id, {
|
|
|
67 |
"indic": ["indic_tweet_ids.txt.gz"]
|
68 |
}
|
69 |
|
70 |
+
|
71 |
# TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
|
72 |
class BernicePretrainData(datasets.GeneratorBasedBuilder):
|
73 |
"""Tweet IDs for the 2.5 billion multilingual tweets used to train Bernice, a Twitter encoder."""
|
|
|
142 |
# TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
143 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
144 |
for filepath in filepaths:
|
145 |
+
with open(filepath, encoding="utf-8") as f:
|
146 |
for line_number, instance in enumerate(f):
|
147 |
tweet_id, lang, year = instance.strip().split("\t")
|
148 |
yield tweet_id, {
|