parquet-converter commited on
Commit
bd43a98
•
1 Parent(s): cdeb4ea

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,44 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.model filter=lfs diff=lfs merge=lfs -text
11
- *.msgpack filter=lfs diff=lfs merge=lfs -text
12
- *.npy filter=lfs diff=lfs merge=lfs -text
13
- *.npz filter=lfs diff=lfs merge=lfs -text
14
- *.onnx filter=lfs diff=lfs merge=lfs -text
15
- *.ot filter=lfs diff=lfs merge=lfs -text
16
- *.parquet filter=lfs diff=lfs merge=lfs -text
17
- *.pb filter=lfs diff=lfs merge=lfs -text
18
- *.pickle filter=lfs diff=lfs merge=lfs -text
19
- *.pkl filter=lfs diff=lfs merge=lfs -text
20
- *.pt filter=lfs diff=lfs merge=lfs -text
21
- *.pth filter=lfs diff=lfs merge=lfs -text
22
- *.rar filter=lfs diff=lfs merge=lfs -text
23
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
24
- *.tar.* filter=lfs diff=lfs merge=lfs -text
25
- *.tflite filter=lfs diff=lfs merge=lfs -text
26
- *.tgz filter=lfs diff=lfs merge=lfs -text
27
- *.wasm filter=lfs diff=lfs merge=lfs -text
28
- *.xz filter=lfs diff=lfs merge=lfs -text
29
- *.zip filter=lfs diff=lfs merge=lfs -text
30
- *.zstandard filter=lfs diff=lfs merge=lfs -text
31
- *tfevents* filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - uncompressed
33
- *.pcm filter=lfs diff=lfs merge=lfs -text
34
- *.sam filter=lfs diff=lfs merge=lfs -text
35
- *.raw filter=lfs diff=lfs merge=lfs -text
36
- # Audio files - compressed
37
- *.aac filter=lfs diff=lfs merge=lfs -text
38
- *.flac filter=lfs diff=lfs merge=lfs -text
39
- *.mp3 filter=lfs diff=lfs merge=lfs -text
40
- *.ogg filter=lfs diff=lfs merge=lfs -text
41
- *.wav filter=lfs diff=lfs merge=lfs -text
42
- corpus.csv filter=lfs diff=lfs merge=lfs -text
43
- train.json filter=lfs diff=lfs merge=lfs -text
44
- valid.json filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,20 +0,0 @@
1
- annotations_creators:
2
- - no-annotation
3
- language:
4
- - en
5
- - fa
6
- language_creators:
7
- - crowdsourced
8
- license:
9
- - other
10
- multilinguality:
11
- - multilingual
12
- pretty_name: en-fa-translation
13
- size_categories:
14
- - 1M<n<10M
15
- source_datasets:
16
- - original
17
- tags: []
18
- task_categories:
19
- - translation
20
- task_ids: []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
en-fa-translation.py DELETED
@@ -1,110 +0,0 @@
1
- import csv
2
- import json
3
- import os
4
-
5
- import datasets
6
-
7
-
8
-
9
- # TODO: Add description of the dataset here
10
- # You can copy an official description
11
- _DESCRIPTION = """\
12
- This new dataset is designed to solve this great NLP task and is crafted with a lot of care.
13
- """
14
- _URL = "https://huggingface.co/datasets/Kamrani/en-fa-translation/resolve/main/"
15
- _URLs = {
16
- "train": _URL + "train.json",
17
- "dev": _URL + "dev.json",
18
- "test": _URL + "test.json",
19
- }
20
-
21
- # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
22
- class PersianTranslateDB(datasets.GeneratorBasedBuilder):
23
- """TODO: Short description of my dataset."""
24
-
25
- VERSION = datasets.Version("1.0.0")
26
-
27
- # This is an example of a dataset with multiple configurations.
28
- # If you don't want/need to define several sub-sets in your dataset,
29
- # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
30
-
31
- # If you need to make complex sub-parts in the datasets with configurable options
32
- # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
33
- # BUILDER_CONFIG_CLASS = MyBuilderConfig
34
-
35
- # You will be able to load one or the other configurations in the following list with
36
- # data = datasets.load_dataset('my_dataset', 'first_domain')
37
- # data = datasets.load_dataset('my_dataset', 'second_domain')
38
- BUILDER_CONFIGS = [
39
- datasets.BuilderConfig(name="translation_en_to_fa", version=VERSION, description="en-fa translation database"),
40
- #datasets.BuilderConfig(name="second_domain", version=VERSION, description="This part of my dataset covers a second domain"),
41
- ]
42
-
43
- DEFAULT_CONFIG_NAME = "translation_en_to_fa" # It's not mandatory to have a default configuration. Just use one if it make sense.
44
-
45
- def _info(self):
46
- # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
47
- features = datasets.Features(
48
- {
49
- "source": datasets.Value("string"),
50
- "target": datasets.Value("string"),
51
- # These are the features of your dataset like images, labels ...
52
- }
53
- )
54
- return datasets.DatasetInfo(
55
- # This is the description that will appear on the datasets page.
56
- description=_DESCRIPTION,
57
- # This defines the different columns of the dataset and their types
58
- features=features, # Here we define them above because they are different between the two configurations
59
- # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
60
- # specify them. They'll be used if as_supervised=True in builder.as_dataset.
61
- # supervised_keys=("sentence", "label"),
62
- )
63
-
64
- def _split_generators(self, dl_manager):
65
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
66
- # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
67
-
68
- # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
69
- # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
70
- # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
71
- #data_dir = dl_manager.download_and_extract(_URL)
72
- data_dir = _URL
73
- return [
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TRAIN,
76
- # These kwargs will be passed to _generate_examples
77
- gen_kwargs={
78
- "filepath": os.path.join(data_dir, "train.json"),
79
- "split": "train",
80
- },
81
- ),
82
- datasets.SplitGenerator(
83
- name=datasets.Split.VALIDATION,
84
- # These kwargs will be passed to _generate_examples
85
- gen_kwargs={
86
- "filepath": os.path.join(data_dir, "valid.json"),
87
- "split": "valid",
88
- },
89
- ),
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TEST,
92
- # These kwargs will be passed to _generate_examples
93
- gen_kwargs={
94
- "filepath": os.path.join(data_dir, "test.json"),
95
- "split": "test"
96
- },
97
- ),
98
- ]
99
-
100
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
101
- def _generate_examples(self, filepath, split):
102
- # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
103
- # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
104
- with open(filepath, encoding="utf-8") as f:
105
- for key, row in enumerate(f):
106
- data = json.loads(row)
107
- yield key, {
108
- "source": data["source"],
109
- "target": data["target"]
110
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test.json DELETED
The diff for this file is too large to render. See raw diff
 
valid.json → translation_en_to_fa/en-fa-translation-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3585dbcc89c291849a9c2a48f15ae224e4353bab7558fe7472d386dc41a0fdc
3
- size 29570613
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:303fc0d8ad094f129d6c04e1ee2671982bcdf39542f190bcd8febc6c3ba81445
3
+ size 240358
train.json → translation_en_to_fa/en-fa-translation-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db2d170bdcb8d29a4a273f27cef5e71e940bb5a9a3286db5724b0438ca0bfb34
3
- size 268916697
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e186552eec5b35d0a5b2b5f464693cb4b5b71a671cf32375bc912c0e70a248c9
3
+ size 133845194
translation_en_to_fa/en-fa-translation-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf89c214e74a82ecdab02edd3f8716a484f5619a2bebde9b566fa77ed7b6089c
3
+ size 11998449