FarsInstruct / README.md
hojjat-m's picture
Update README.md
7ad4114 verified
|
raw
history blame
17.6 kB
---
license: apache-2.0
dataset_info:
- config_name: default
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 6643617601
num_examples: 9361602
- name: validation
num_bytes: 334430146
num_examples: 315700
- name: test
num_bytes: 908058507
num_examples: 1299146
download_size: 3635837290
dataset_size: 7886106254
- config_name: digi_sentiment
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 17608449
num_examples: 22820
- name: validation
num_bytes: 3882215
num_examples: 4890
- name: test
num_bytes: 3868362
num_examples: 4900
download_size: 8359417
dataset_size: 25359026
- config_name: digimag
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 238365098
num_examples: 68960
- name: validation
num_bytes: 26780346
num_examples: 7670
- name: test
num_bytes: 28913146
num_examples: 8520
download_size: 133522898
dataset_size: 294058590
- config_name: exappc
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 155435407
num_examples: 270269
- name: validation
num_bytes: 32953710
num_examples: 57592
- name: test
num_bytes: 33691407
num_examples: 58033
download_size: 82477372
dataset_size: 222080524
- config_name: farstail
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 55160519
num_examples: 72660
- name: validation
num_bytes: 11531714
num_examples: 15370
- name: test
num_bytes: 11743134
num_examples: 15640
download_size: 29639181
dataset_size: 78435367
- config_name: p3_qa_translated
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 369861273
num_examples: 646505
- name: validation
num_bytes: 39799885
num_examples: 76565
download_size: 114937546
dataset_size: 409661158
- config_name: pars_absa
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 42114471
num_examples: 45006
- name: validation
num_bytes: 7177753
num_examples: 7506
- name: test
num_bytes: 6919680
num_examples: 7500
download_size: 20744680
dataset_size: 56211904
- config_name: parsinlu_comp
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 7262540
num_examples: 6000
- name: validation
num_bytes: 1587072
num_examples: 1250
- name: test
num_bytes: 6613589
num_examples: 5700
download_size: 6217942
dataset_size: 15463201
- config_name: parsinlu_en_fa
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 954515437
num_examples: 3000000
- name: validation
num_bytes: 4644558
num_examples: 12822
- name: test
num_bytes: 137665422
num_examples: 290154
download_size: 371019560
dataset_size: 1096825417
- config_name: parsinlu_entailment
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 5633585
num_examples: 8296
- name: validation
num_bytes: 2057795
num_examples: 2970
- name: test
num_bytes: 12628240
num_examples: 18407
download_size: 6613453
dataset_size: 20319620
- config_name: parsinlu_fa_en
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 951650636
num_examples: 2999995
- name: validation
num_bytes: 9269774
num_examples: 25644
- name: test
num_bytes: 267276146
num_examples: 572928
download_size: 429940819
dataset_size: 1228196556
- config_name: parsinlu_qpp
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 8575019
num_examples: 17164
- name: validation
num_bytes: 4246069
num_examples: 8418
- name: test
num_bytes: 8529281
num_examples: 18078
download_size: 5262177
dataset_size: 21350369
- config_name: parsinlu_sentiment
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 73453659
num_examples: 136170
download_size: 19197308
dataset_size: 73453659
- config_name: persian_ner
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 79721941
num_examples: 143388
- name: test
num_bytes: 39461389
num_examples: 71680
download_size: 42457467
dataset_size: 119183330
- config_name: persian_news
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 215514328
num_examples: 79884
- name: validation
num_bytes: 23986039
num_examples: 8880
- name: test
num_bytes: 26811003
num_examples: 9864
download_size: 115002364
dataset_size: 266311370
- config_name: persian_qa
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 173326369
num_examples: 81974
- name: validation
num_bytes: 17626045
num_examples: 8463
download_size: 84699623
dataset_size: 190952414
- config_name: peyma
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 44356046
num_examples: 80280
- name: validation
num_bytes: 4981884
num_examples: 9250
- name: test
num_bytes: 5799753
num_examples: 10260
download_size: 18549315
dataset_size: 55137683
- config_name: pn_sum
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 2037812524
num_examples: 902242
- name: validation
num_bytes: 20817174
num_examples: 11184
- name: test
num_bytes: 138618064
num_examples: 61523
download_size: 947261646
dataset_size: 2197247762
- config_name: snapp_sentiment
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 264483001
num_examples: 573210
- name: validation
num_bytes: 42179746
num_examples: 91707
- name: test
num_bytes: 45729114
num_examples: 99363
download_size: 93188564
dataset_size: 352391861
- config_name: syntran
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 240609866
num_examples: 481060
download_size: 66613727
dataset_size: 240609866
- config_name: wiki_sum
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 1075245114
num_examples: 365224
- name: validation
num_bytes: 120120218
num_examples: 40584
- name: test
num_bytes: 133195903
num_examples: 45096
download_size: 629544114
dataset_size: 1328561235
- config_name: xl_wic
features:
- name: inputs
dtype: string
- name: outputs
dtype: string
- name: dataset
dtype: string
- name: template
dtype: string
splits:
- name: train
num_bytes: 2773592
num_examples: 7000
- name: validation
num_bytes: 588034
num_examples: 1500
- name: test
num_bytes: 594874
num_examples: 1500
download_size: 1404098
dataset_size: 3956500
configs:
- config_name: default
data_files:
- split: train
path: data/train-*
- split: validation
path: data/validation-*
- split: test
path: data/test-*
- config_name: digi_sentiment
data_files:
- split: train
path: digi_sentiment/train-*
- split: validation
path: digi_sentiment/validation-*
- split: test
path: digi_sentiment/test-*
- config_name: digimag
data_files:
- split: train
path: digimag/train-*
- split: validation
path: digimag/validation-*
- split: test
path: digimag/test-*
- config_name: exappc
data_files:
- split: train
path: exappc/train-*
- split: validation
path: exappc/validation-*
- split: test
path: exappc/test-*
- config_name: farstail
data_files:
- split: train
path: farstail/train-*
- split: validation
path: farstail/validation-*
- split: test
path: farstail/test-*
- config_name: p3_qa_translated
data_files:
- split: train
path: p3_qa_translated/train-*
- split: validation
path: p3_qa_translated/validation-*
- config_name: pars_absa
data_files:
- split: train
path: pars_absa/train-*
- split: validation
path: pars_absa/validation-*
- split: test
path: pars_absa/test-*
- config_name: parsinlu_comp
data_files:
- split: train
path: parsinlu_comp/train-*
- split: validation
path: parsinlu_comp/validation-*
- split: test
path: parsinlu_comp/test-*
- config_name: parsinlu_en_fa
data_files:
- split: train
path: parsinlu_en_fa/train-*
- split: validation
path: parsinlu_en_fa/validation-*
- split: test
path: parsinlu_en_fa/test-*
- config_name: parsinlu_entailment
data_files:
- split: train
path: parsinlu_entailment/train-*
- split: validation
path: parsinlu_entailment/validation-*
- split: test
path: parsinlu_entailment/test-*
- config_name: parsinlu_fa_en
data_files:
- split: train
path: parsinlu_fa_en/train-*
- split: validation
path: parsinlu_fa_en/validation-*
- split: test
path: parsinlu_fa_en/test-*
- config_name: parsinlu_qpp
data_files:
- split: train
path: parsinlu_qpp/train-*
- split: validation
path: parsinlu_qpp/validation-*
- split: test
path: parsinlu_qpp/test-*
- config_name: parsinlu_sentiment
data_files:
- split: train
path: parsinlu_sentiment/train-*
- config_name: persian_ner
data_files:
- split: train
path: persian_ner/train-*
- split: test
path: persian_ner/test-*
- config_name: persian_news
data_files:
- split: train
path: persian_news/train-*
- split: validation
path: persian_news/validation-*
- split: test
path: persian_news/test-*
- config_name: persian_qa
data_files:
- split: train
path: persian_qa/train-*
- split: validation
path: persian_qa/validation-*
- config_name: peyma
data_files:
- split: train
path: peyma/train-*
- split: validation
path: peyma/validation-*
- split: test
path: peyma/test-*
- config_name: pn_sum
data_files:
- split: train
path: pn_sum/train-*
- split: validation
path: pn_sum/validation-*
- split: test
path: pn_sum/test-*
- config_name: snapp_sentiment
data_files:
- split: train
path: snapp_sentiment/train-*
- split: validation
path: snapp_sentiment/validation-*
- split: test
path: snapp_sentiment/test-*
- config_name: syntran
data_files:
- split: train
path: syntran/train-*
- config_name: wiki_sum
data_files:
- split: train
path: wiki_sum/train-*
- split: validation
path: wiki_sum/validation-*
- split: test
path: wiki_sum/test-*
- config_name: xl_wic
data_files:
- split: train
path: xl_wic/train-*
- split: validation
path: xl_wic/validation-*
- split: test
path: xl_wic/test-*
task_categories:
- text-classification
- question-answering
- translation
- text-generation
language:
- fa
pretty_name: FarsInstruct
---
# Dataset Card for Dataset Name
<!-- Provide a quick summary of the dataset. -->
This dataset card aims to be a base template for new datasets. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md?plain=1).
## Dataset Details
### Dataset Description
<!-- Provide a longer summary of what this dataset is. -->
- **Curated by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
### Dataset Sources [optional]
<!-- Provide the basic links for the dataset. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the dataset is intended to be used. -->
### Direct Use
<!-- This section describes suitable use cases for the dataset. -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the dataset will not work well for. -->
[More Information Needed]
## Dataset Structure
<!-- This section provides a description of the dataset fields, and additional information about the dataset structure such as criteria used to create the splits, relationships between data points, etc. -->
[More Information Needed]
## Dataset Creation
### Curation Rationale
<!-- Motivation for the creation of this dataset. -->
[More Information Needed]
### Source Data
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
#### Data Collection and Processing
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
[More Information Needed]
#### Who are the source data producers?
<!-- This section describes the people or systems who originally created the data. It should also include self-reported demographic or identity information for the source data creators if this information is available. -->
[More Information Needed]
### Annotations [optional]
<!-- If the dataset contains annotations which are not part of the initial data collection, use this section to describe them. -->
#### Annotation process
<!-- This section describes the annotation process such as annotation tools used in the process, the amount of data annotated, annotation guidelines provided to the annotators, interannotator statistics, annotation validation, etc. -->
[More Information Needed]
#### Who are the annotators?
<!-- This section describes the people or systems who created the annotations. -->
[More Information Needed]
#### Personal and Sensitive Information
<!-- State whether the dataset contains data that might be considered personal, sensitive, or private (e.g., data that reveals addresses, uniquely identifiable names or aliases, racial or ethnic origins, sexual orientations, religious beliefs, political opinions, financial or health data, etc.). If efforts were made to anonymize the data, describe the anonymization process. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.
## Citation
<!-- If there is a paper or blog post introducing the dataset, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
```bibtex
@misc{mokhtarabadi2024farsinstructempoweringlargelanguage,
title={FarsInstruct: Empowering Large Language Models for Persian Instruction Understanding},
author={Hojjat Mokhtarabadi and Ziba Zamani and Abbas Maazallahi and Hossein Manshaei},
year={2024},
eprint={2407.11186},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2407.11186},
}
```