Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
Persian
Size:
100K - 1M
| import datasets | |
| import csv | |
| import os | |
| import sys | |
| csv.field_size_limit(sys.maxsize) | |
| _DESCRIPTION = """persian_blog is a dataset consist of 400K blog posts from various websites and has types of tones. | |
| this dataset can be used in different NLG tasks and as a show-case it's is used in training reformer-persian.""" | |
| _PROJECT_URL = """""" | |
| _CITATION = """ | |
| https://saied71.github.io/RohanAiLab/, | |
| author={Saied Alimoradi}, | |
| year={2021} | |
| } | |
| """ | |
| _URL = "blogs.zip" | |
| class persian_blog(datasets.GeneratorBasedBuilder): | |
| def _info(self): | |
| return datasets.DatasetInfo( | |
| description=_DESCRIPTION, | |
| features=datasets.Features( | |
| { | |
| "text": datasets.Value("string") | |
| } | |
| ), | |
| homepage=_PROJECT_URL, | |
| citation=_CITATION, | |
| ) | |
| def _split_generators(self, dl_manager): | |
| """Returns SplitGenerators.""" | |
| dl_dir = dl_manager.download_and_extract(_URL) | |
| data_dir = os.path.join(dl_dir, "blogs.csv") | |
| return [ | |
| datasets.SplitGenerator( | |
| name=datasets.Split.TRAIN, | |
| gen_kwargs={ | |
| "filepath": data_dir, | |
| },),] | |
| def _generate_examples(self, filepath): | |
| """Yields examples.""" | |
| with open(filepath, encoding="utf-8") as f: | |
| reader = csv.reader(f) | |
| for id_, row in enumerate(reader): | |
| if id_ == 0: | |
| continue | |
| yield id_, { | |
| "text": row[0] | |
| } | |