Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
multiple-choice-qa
Languages:
English
Size:
1M - 10M
ArXiv:
License:
| { | |
| "abstract_algebra": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "abstract_algebra", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 21316, | |
| "num_examples": 100, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 2232, | |
| "num_examples": 11, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 918, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47179638, | |
| "dataset_size": 161025091, | |
| "size_in_bytes": 208204729 | |
| }, | |
| "anatomy": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "anatomy", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 34594, | |
| "num_examples": 135, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 3282, | |
| "num_examples": 14, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1010, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47191229, | |
| "dataset_size": 161039511, | |
| "size_in_bytes": 208230740 | |
| }, | |
| "astronomy": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "astronomy", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 48735, | |
| "num_examples": 152, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 5223, | |
| "num_examples": 16, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2129, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47201551, | |
| "dataset_size": 161056712, | |
| "size_in_bytes": 208258263 | |
| }, | |
| "business_ethics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "business_ethics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 35140, | |
| "num_examples": 100, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 3235, | |
| "num_examples": 11, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2273, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47193421, | |
| "dataset_size": 161041273, | |
| "size_in_bytes": 208234694 | |
| }, | |
| "clinical_knowledge": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "clinical_knowledge", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 68572, | |
| "num_examples": 265, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 7290, | |
| "num_examples": 29, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1308, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47213955, | |
| "dataset_size": 161077795, | |
| "size_in_bytes": 208291750 | |
| }, | |
| "college_biology": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "college_biology", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 51521, | |
| "num_examples": 144, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 5111, | |
| "num_examples": 16, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1615, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47205152, | |
| "dataset_size": 161058872, | |
| "size_in_bytes": 208264024 | |
| }, | |
| "college_chemistry": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "college_chemistry", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 26796, | |
| "num_examples": 100, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 2484, | |
| "num_examples": 8, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1424, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47188958, | |
| "dataset_size": 161031329, | |
| "size_in_bytes": 208220287 | |
| }, | |
| "college_computer_science": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "college_computer_science", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 45429, | |
| "num_examples": 100, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 4959, | |
| "num_examples": 11, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2893, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47201966, | |
| "dataset_size": 161053906, | |
| "size_in_bytes": 208255872 | |
| }, | |
| "college_mathematics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "college_mathematics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 26999, | |
| "num_examples": 100, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 2909, | |
| "num_examples": 11, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1596, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47188597, | |
| "dataset_size": 161032129, | |
| "size_in_bytes": 208220726 | |
| }, | |
| "college_medicine": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "college_medicine", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 85845, | |
| "num_examples": 173, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 8337, | |
| "num_examples": 22, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1758, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47218201, | |
| "dataset_size": 161096565, | |
| "size_in_bytes": 208314766 | |
| }, | |
| "college_physics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "college_physics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 32107, | |
| "num_examples": 102, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 3687, | |
| "num_examples": 11, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1495, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47190901, | |
| "dataset_size": 161037914, | |
| "size_in_bytes": 208228815 | |
| }, | |
| "computer_security": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "computer_security", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 29212, | |
| "num_examples": 100, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 4768, | |
| "num_examples": 11, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1194, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47192155, | |
| "dataset_size": 161035799, | |
| "size_in_bytes": 208227954 | |
| }, | |
| "conceptual_physics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "conceptual_physics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 45867, | |
| "num_examples": 235, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 5034, | |
| "num_examples": 26, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1032, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47197231, | |
| "dataset_size": 161052558, | |
| "size_in_bytes": 208249789 | |
| }, | |
| "econometrics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "econometrics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 48359, | |
| "num_examples": 114, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 5147, | |
| "num_examples": 12, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1712, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47197846, | |
| "dataset_size": 161055843, | |
| "size_in_bytes": 208253689 | |
| }, | |
| "electrical_engineering": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "electrical_engineering", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 28900, | |
| "num_examples": 145, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 3307, | |
| "num_examples": 16, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1090, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47189021, | |
| "dataset_size": 161033922, | |
| "size_in_bytes": 208222943 | |
| }, | |
| "elementary_mathematics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "elementary_mathematics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 79924, | |
| "num_examples": 378, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 10042, | |
| "num_examples": 41, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1558, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47216972, | |
| "dataset_size": 161092149, | |
| "size_in_bytes": 208309121 | |
| }, | |
| "formal_logic": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "formal_logic", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 51789, | |
| "num_examples": 126, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 6464, | |
| "num_examples": 14, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1825, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47194349, | |
| "dataset_size": 161060703, | |
| "size_in_bytes": 208255052 | |
| }, | |
| "global_facts": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "global_facts", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 19991, | |
| "num_examples": 100, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 2013, | |
| "num_examples": 10, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1297, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47181634, | |
| "dataset_size": 161023926, | |
| "size_in_bytes": 208205560 | |
| }, | |
| "high_school_biology": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "high_school_biology", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 116850, | |
| "num_examples": 310, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 11746, | |
| "num_examples": 32, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1776, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47239946, | |
| "dataset_size": 161130997, | |
| "size_in_bytes": 208370943 | |
| }, | |
| "high_school_chemistry": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "high_school_chemistry", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 63527, | |
| "num_examples": 203, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 7630, | |
| "num_examples": 22, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1333, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47207769, | |
| "dataset_size": 161073115, | |
| "size_in_bytes": 208280884 | |
| }, | |
| "high_school_computer_science": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "high_school_computer_science", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 47664, | |
| "num_examples": 100, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 3619, | |
| "num_examples": 9, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 3066, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47200669, | |
| "dataset_size": 161054974, | |
| "size_in_bytes": 208255643 | |
| }, | |
| "high_school_european_history": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "high_school_european_history", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 275568, | |
| "num_examples": 165, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 30196, | |
| "num_examples": 18, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 11712, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47349494, | |
| "dataset_size": 161318101, | |
| "size_in_bytes": 208667595 | |
| }, | |
| "high_school_geography": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "high_school_geography", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 46972, | |
| "num_examples": 198, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 4870, | |
| "num_examples": 22, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1516, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47200648, | |
| "dataset_size": 161053983, | |
| "size_in_bytes": 208254631 | |
| }, | |
| "high_school_government_and_politics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "high_school_government_and_politics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 73589, | |
| "num_examples": 193, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 7870, | |
| "num_examples": 21, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1962, | |
| "num_examples": 5, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 47214961, | |
| "dataset_size": 161084046, | |
| "size_in_bytes": 208299007 | |
| }, | |
| "high_school_macroeconomics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "high_school_macroeconomics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 117675, | |
| "num_examples": 390, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 13008, | |
| "num_examples": 43, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1316, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160733256, | |
| "size_in_bytes": 326918216 | |
| }, | |
| "high_school_mathematics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "high_school_mathematics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 54842, | |
| "num_examples": 270, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 5753, | |
| "num_examples": 29, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1285, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160663137, | |
| "size_in_bytes": 326848097 | |
| }, | |
| "high_school_microeconomics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "high_school_microeconomics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 75691, | |
| "num_examples": 238, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 7541, | |
| "num_examples": 26, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1286, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160685775, | |
| "size_in_bytes": 326870735 | |
| }, | |
| "high_school_physics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "high_school_physics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 59526, | |
| "num_examples": 151, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 6759, | |
| "num_examples": 17, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1477, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160669019, | |
| "size_in_bytes": 326853979 | |
| }, | |
| "high_school_psychology": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "high_school_psychology", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 159395, | |
| "num_examples": 545, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 17257, | |
| "num_examples": 60, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1893, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160779802, | |
| "size_in_bytes": 326964762 | |
| }, | |
| "high_school_statistics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "high_school_statistics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 110690, | |
| "num_examples": 216, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 9985, | |
| "num_examples": 23, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2516, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160724448, | |
| "size_in_bytes": 326909408 | |
| }, | |
| "high_school_us_history": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "high_school_us_history", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 296722, | |
| "num_examples": 204, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 31694, | |
| "num_examples": 22, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 8852, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160938525, | |
| "size_in_bytes": 327123485 | |
| }, | |
| "high_school_world_history": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "high_school_world_history", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 378605, | |
| "num_examples": 237, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 45489, | |
| "num_examples": 26, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 4870, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 161030221, | |
| "size_in_bytes": 327215181 | |
| }, | |
| "human_aging": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "human_aging", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 46086, | |
| "num_examples": 223, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 4695, | |
| "num_examples": 23, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 996, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160653034, | |
| "size_in_bytes": 326837994 | |
| }, | |
| "human_sexuality": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "human_sexuality", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 32098, | |
| "num_examples": 131, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 2409, | |
| "num_examples": 12, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1065, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160636829, | |
| "size_in_bytes": 326821789 | |
| }, | |
| "international_law": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "international_law", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 53519, | |
| "num_examples": 121, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 6461, | |
| "num_examples": 13, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2406, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160663643, | |
| "size_in_bytes": 326848603 | |
| }, | |
| "jurisprudence": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "jurisprudence", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 33974, | |
| "num_examples": 108, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 3717, | |
| "num_examples": 11, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1291, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160640239, | |
| "size_in_bytes": 326825199 | |
| }, | |
| "logical_fallacies": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "logical_fallacies", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 50105, | |
| "num_examples": 163, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 5091, | |
| "num_examples": 18, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1561, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160658014, | |
| "size_in_bytes": 326842974 | |
| }, | |
| "machine_learning": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "machine_learning", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 33868, | |
| "num_examples": 112, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 3220, | |
| "num_examples": 11, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2311, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160640656, | |
| "size_in_bytes": 326825616 | |
| }, | |
| "management": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "management", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 19990, | |
| "num_examples": 103, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 1808, | |
| "num_examples": 11, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 886, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160623941, | |
| "size_in_bytes": 326808901 | |
| }, | |
| "marketing": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "marketing", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 63013, | |
| "num_examples": 234, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 7382, | |
| "num_examples": 25, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1469, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160673121, | |
| "size_in_bytes": 326858081 | |
| }, | |
| "medical_genetics": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "medical_genetics", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 20852, | |
| "num_examples": 100, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 2993, | |
| "num_examples": 11, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1077, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160626179, | |
| "size_in_bytes": 326811139 | |
| }, | |
| "miscellaneous": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "miscellaneous", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 147692, | |
| "num_examples": 783, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 14318, | |
| "num_examples": 86, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 687, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160763954, | |
| "size_in_bytes": 326948914 | |
| }, | |
| "moral_disputes": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "moral_disputes", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 107806, | |
| "num_examples": 346, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 12408, | |
| "num_examples": 38, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1743, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160723214, | |
| "size_in_bytes": 326908174 | |
| }, | |
| "moral_scenarios": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "moral_scenarios", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 374014, | |
| "num_examples": 895, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 42326, | |
| "num_examples": 100, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2046, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 161019643, | |
| "size_in_bytes": 327204603 | |
| }, | |
| "nutrition": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "nutrition", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 92398, | |
| "num_examples": 306, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 8424, | |
| "num_examples": 33, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2073, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160704152, | |
| "size_in_bytes": 326889112 | |
| }, | |
| "philosophy": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "philosophy", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 80061, | |
| "num_examples": 311, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 9172, | |
| "num_examples": 34, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 976, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160691466, | |
| "size_in_bytes": 326876426 | |
| }, | |
| "prehistory": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "prehistory", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 89582, | |
| "num_examples": 324, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 10273, | |
| "num_examples": 35, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1866, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160702978, | |
| "size_in_bytes": 326887938 | |
| }, | |
| "professional_accounting": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "professional_accounting", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 124538, | |
| "num_examples": 282, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 14360, | |
| "num_examples": 31, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2136, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160742291, | |
| "size_in_bytes": 326927251 | |
| }, | |
| "professional_law": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "professional_law", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 1891750, | |
| "num_examples": 1534, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 203507, | |
| "num_examples": 170, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 6598, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 162703112, | |
| "size_in_bytes": 328888072 | |
| }, | |
| "professional_medicine": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "professional_medicine", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 217549, | |
| "num_examples": 272, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 23835, | |
| "num_examples": 31, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 3795, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160846436, | |
| "size_in_bytes": 327031396 | |
| }, | |
| "professional_psychology": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "professional_psychology", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 225887, | |
| "num_examples": 612, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 29089, | |
| "num_examples": 69, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 2255, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160858488, | |
| "size_in_bytes": 327043448 | |
| }, | |
| "public_relations": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "public_relations", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 28748, | |
| "num_examples": 110, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 4554, | |
| "num_examples": 12, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1484, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160636043, | |
| "size_in_bytes": 326821003 | |
| }, | |
| "security_studies": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "security_studies", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 204832, | |
| "num_examples": 245, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 22625, | |
| "num_examples": 27, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 5323, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160834037, | |
| "size_in_bytes": 327018997 | |
| }, | |
| "sociology": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "sociology", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 66231, | |
| "num_examples": 201, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 7172, | |
| "num_examples": 22, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1601, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160676261, | |
| "size_in_bytes": 326861221 | |
| }, | |
| "us_foreign_policy": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "us_foreign_policy", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 28431, | |
| "num_examples": 100, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 3252, | |
| "num_examples": 11, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1599, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160634539, | |
| "size_in_bytes": 326819499 | |
| }, | |
| "virology": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "virology", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 38747, | |
| "num_examples": 166, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 5451, | |
| "num_examples": 18, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 1084, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160646539, | |
| "size_in_bytes": 326831499 | |
| }, | |
| "world_religions": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "id": null, | |
| "_type": "Value" | |
| }, | |
| "length": -1, | |
| "id": null, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "num_classes": 4, | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "id": null, | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "post_processed": null, | |
| "supervised_keys": null, | |
| "task_templates": null, | |
| "builder_name": "mmlu", | |
| "config_name": "world_religions", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "description": null, | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 160601257, | |
| "num_examples": 99842, | |
| "dataset_name": "mmlu" | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 25262, | |
| "num_examples": 171, | |
| "dataset_name": "mmlu" | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 2753, | |
| "num_examples": 19, | |
| "dataset_name": "mmlu" | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 658, | |
| "num_examples": 5, | |
| "dataset_name": "mmlu" | |
| } | |
| }, | |
| "download_checksums": { | |
| "data.tar": { | |
| "num_bytes": 166184960, | |
| "checksum": "bec563ba4bac1d6aaf04141cd7d1605d7a5ca833e38f994051e818489592989b" | |
| } | |
| }, | |
| "download_size": 166184960, | |
| "post_processing_size": null, | |
| "dataset_size": 160629930, | |
| "size_in_bytes": 326814890 | |
| }, | |
| "all": { | |
| "description": "This is a massive multitask test consisting of multiple-choice questions from various branches of knowledge, covering 57 tasks including elementary mathematics, US history, computer science, law, and more.\n", | |
| "citation": "@article{hendryckstest2021,\n title={Measuring Massive Multitask Language Understanding},\n author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},\n journal={Proceedings of the International Conference on Learning Representations (ICLR)},\n year={2021}\n }\n", | |
| "homepage": "https://github.com/hendrycks/test", | |
| "license": "", | |
| "features": { | |
| "question": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "subject": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "choices": { | |
| "feature": { | |
| "dtype": "string", | |
| "_type": "Value" | |
| }, | |
| "_type": "Sequence" | |
| }, | |
| "answer": { | |
| "names": [ | |
| "A", | |
| "B", | |
| "C", | |
| "D" | |
| ], | |
| "_type": "ClassLabel" | |
| } | |
| }, | |
| "builder_name": "parquet", | |
| "dataset_name": "mmlu", | |
| "config_name": "all", | |
| "version": { | |
| "version_str": "1.0.0", | |
| "major": 1, | |
| "minor": 0, | |
| "patch": 0 | |
| }, | |
| "splits": { | |
| "auxiliary_train": { | |
| "name": "auxiliary_train", | |
| "num_bytes": 161000625, | |
| "num_examples": 99842, | |
| "dataset_name": null | |
| }, | |
| "test": { | |
| "name": "test", | |
| "num_bytes": 6967453, | |
| "num_examples": 14042, | |
| "dataset_name": null | |
| }, | |
| "validation": { | |
| "name": "validation", | |
| "num_bytes": 763484, | |
| "num_examples": 1531, | |
| "dataset_name": null | |
| }, | |
| "dev": { | |
| "name": "dev", | |
| "num_bytes": 125353, | |
| "num_examples": 285, | |
| "dataset_name": null | |
| } | |
| }, | |
| "download_size": 51132212, | |
| "dataset_size": 168856915, | |
| "size_in_bytes": 219989127 | |
| } | |
| } |