SaylorTwift HF Staff commited on
Commit
9c77183
·
verified ·
1 Parent(s): c57cd54

Delete loading script

Browse files
Files changed (1) hide show
  1. lsat_qa.py +0 -87
lsat_qa.py DELETED
@@ -1,87 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Covid Dialog dataset in English and Chinese"""
16
-
17
-
18
- import copy
19
- import os
20
- import re
21
- import textwrap
22
- import json
23
-
24
- import datasets
25
-
26
-
27
- # BibTeX citation
28
- _CITATION = """
29
- """
30
-
31
- # Official description of the dataset
32
- _DESCRIPTION = textwrap.dedent(
33
- """
34
- """
35
- )
36
-
37
- # Link to an official homepage for the dataset here
38
- _HOMEPAGE = ""
39
-
40
- _LICENSE = ""
41
-
42
-
43
- import datasets
44
- import os
45
- import json
46
-
47
- names = ["all", "assignment", "grouping", "miscellaneous", "ordering"]
48
-
49
- class LsatQA(datasets.GeneratorBasedBuilder):
50
- VERSION = datasets.Version("1.0.0")
51
-
52
- BUILDER_CONFIGS = [datasets.BuilderConfig(name=name, version=datasets.Version("1.0.0"), description=_DESCRIPTION) for name in names]
53
-
54
- def _info(self):
55
- features = datasets.Features(
56
- {
57
- "passage": datasets.Value("string"),
58
- "question": datasets.Value("string"),
59
- "references": datasets.Sequence(datasets.Value("string")),
60
- "gold_index": datasets.Value("int64"),
61
- }
62
- )
63
- return datasets.DatasetInfo(
64
- description=f"LSAT QA dataset, as preprocessed and shuffled in HELM",
65
- features=features,
66
- homepage=_HOMEPAGE,
67
- license=_LICENSE,
68
- citation=_CITATION,
69
- )
70
-
71
- def _split_generators(self, dl_manager):
72
- test = dl_manager.download(os.path.join(self.config.name, "test.jsonl"))
73
- train = dl_manager.download(os.path.join(self.config.name, "train.jsonl"))
74
- val = dl_manager.download(os.path.join(self.config.name, "valid.jsonl"))
75
-
76
- return [
77
- datasets.SplitGenerator(
78
- name=datasets.Split.TEST,
79
- gen_kwargs={"file": test},
80
- )
81
- ]
82
-
83
- # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
84
- def _generate_examples(self, file):
85
- with open(file, encoding="utf-8") as f:
86
- for ix, line in enumerate(f):
87
- yield ix, json.loads(line)