|
--- |
|
language: |
|
- en |
|
- zh |
|
license: cc-by-sa-4.0 |
|
task_categories: |
|
- multiple-choice |
|
pretty_name: LogiQA2.0 |
|
data_splits: |
|
- train |
|
- validation |
|
- test |
|
dataset_info: |
|
- config_name: logieval |
|
features: |
|
- name: content |
|
dtype: string |
|
- name: ideal |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 1247308 |
|
num_examples: 1354 |
|
- name: test |
|
num_bytes: 1505394 |
|
num_examples: 1572 |
|
download_size: 1258944 |
|
dataset_size: 2752702 |
|
- config_name: logiqa2_nli |
|
features: |
|
- name: label |
|
dtype: |
|
class_label: |
|
names: |
|
'0': not entailed |
|
'1': entailed |
|
- name: major_premise |
|
sequence: string |
|
- name: minor_premise |
|
dtype: string |
|
- name: conclusion |
|
dtype: string |
|
splits: |
|
- name: train |
|
num_bytes: 17728839 |
|
num_examples: 31531 |
|
- name: test |
|
num_bytes: 2213492 |
|
num_examples: 3942 |
|
- name: validation |
|
num_bytes: 2208687 |
|
num_examples: 3941 |
|
download_size: 13273725 |
|
dataset_size: 22151018 |
|
- config_name: logiqa2_zh |
|
features: |
|
- name: answer |
|
dtype: int32 |
|
- name: text |
|
dtype: string |
|
- name: question |
|
dtype: string |
|
- name: options |
|
sequence: string |
|
splits: |
|
- name: train |
|
num_bytes: 8820627 |
|
num_examples: 12751 |
|
- name: test |
|
num_bytes: 1087414 |
|
num_examples: 1594 |
|
- name: validation |
|
num_bytes: 1107666 |
|
num_examples: 1593 |
|
download_size: 7563394 |
|
dataset_size: 11015707 |
|
configs: |
|
- config_name: logieval |
|
data_files: |
|
- split: train |
|
path: logieval/train-* |
|
- split: test |
|
path: logieval/test-* |
|
- config_name: logiqa2_nli |
|
data_files: |
|
- split: train |
|
path: logiqa2_nli/train-* |
|
- split: test |
|
path: logiqa2_nli/test-* |
|
- split: validation |
|
path: logiqa2_nli/validation-* |
|
- config_name: logiqa2_zh |
|
data_files: |
|
- split: train |
|
path: logiqa2_zh/train-* |
|
- split: test |
|
path: logiqa2_zh/test-* |
|
- split: validation |
|
path: logiqa2_zh/validation-* |
|
--- |
|
|
|
# Dataset Card for Dataset Name |
|
|
|
## Dataset Description |
|
|
|
- **Homepage:** https://github.com/csitfun/LogiQA2.0, https://github.com/csitfun/LogiEval |
|
- **Repository:** https://github.com/csitfun/LogiQA2.0, https://github.com/csitfun/LogiEval |
|
- **Paper:** https://ieeexplore.ieee.org/abstract/document/10174688 |
|
|
|
### Dataset Summary |
|
|
|
Logiqa2.0 dataset - logical reasoning in MRC and NLI tasks |
|
|
|
LogiEval: a benchmark suite for testing logical reasoning abilities of instruct-prompt large language models |
|
|
|
### Licensing Information |
|
|
|
Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. |
|
|
|
### Citation Information |
|
|
|
@ARTICLE{10174688, |
|
author={Liu, Hanmeng and Liu, Jian and Cui, Leyang and Teng, Zhiyang and Duan, Nan and Zhou, Ming and Zhang, Yue}, |
|
journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing}, |
|
title={LogiQA 2.0 — An Improved Dataset for Logical Reasoning in Natural Language Understanding}, |
|
year={2023}, |
|
volume={}, |
|
number={}, |
|
pages={1-16}, |
|
doi={10.1109/TASLP.2023.3293046}} |
|
|
|
@misc{liu2023evaluating, |
|
title={Evaluating the Logical Reasoning Ability of ChatGPT and GPT-4}, |
|
author={Hanmeng Liu and Ruoxi Ning and Zhiyang Teng and Jian Liu and Qiji Zhou and Yue Zhang}, |
|
year={2023}, |
|
eprint={2304.03439}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |