import json import datasets class SanskritWordSegmentation(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") def _info(self): return datasets.DatasetInfo( description="Gold-standard Sanskrit word segmentation and morphological candidate graphs.", features=datasets.Features({ "id": datasets.Value("string"), "sentence": datasets.Value("string"), "gold_segments": datasets.Sequence(datasets.Value("string")), "lemmas": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), "morph_tags": datasets.Sequence(datasets.Sequence(datasets.Value("string"))), "candidates": datasets.Sequence({ "id": datasets.Value("string"), "word": datasets.Value("string"), "lemma": datasets.Value("string"), "morph": datasets.Value("string"), "cng": datasets.Value("string"), "chunk_no": datasets.Value("string"), "position": datasets.Value("int32"), "length": datasets.Value("int32"), "pre_verb": datasets.Value("string"), }), }), supervised_keys=None, homepage="https://zenodo.org/records/803508", citation="Krishna et al. (2017), A Dataset for Sanskrit Word Segmentation", ) def _split_generators(self, dl_manager): data_path = dl_manager.download_and_extract("data.jsonl") return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_path}), ] def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: for idx, line in enumerate(f): yield idx, json.loads(line)