File size: 4,608 Bytes
84d02c4 26f8116 412792d 806ae69 84d02c4 412792d 84d02c4 412792d a555323 84d02c4 806ae69 412792d f477d6e 28d6f06 412792d 806ae69 412792d f65a8a4 412792d 84d02c4 28d6f06 84d02c4 412792d 26f8116 84d02c4 412792d 28d6f06 412792d 28d6f06 84d02c4 412792d 84d02c4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
import datasets
import pyarrow as pa
import pyarrow.parquet as pq
DESCRIPTION = "The dataset contains Airbnb data from 80 capitals and major cities all around the world."
# DATA_URL="https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/all_airbnb.parquet"
DATA_DIRS = ["benchmark", "all"]
RESOLUTIONS=["8","9","10"]
class AirbnbDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig """
def __init__(self, data_url, **kwargs):
"""BuilderConfig.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(AirbnbDatasetConfig, self).__init__(**kwargs)
self.data_url = data_url
class AirbnbDataset(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = AirbnbDatasetConfig
DEFAULT_CONFIG_NAME = "8"
BUILDER_CONFIGS = [
AirbnbDatasetConfig(
name = res,
description = f"This is the official train test split for Airbnb Datatset in h3 resolution = {res}. Benchmark cities are: Paris, London, Rome, Melbourne, New York City, Amsterdam.",
data_url={
"train": f"https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/res_{res}/airbnb_train.parquet",
"test": f"https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/res_{res}/airbnb_test.parquet"
}
)
for res in RESOLUTIONS
]
BUILDER_CONFIGS = BUILDER_CONFIGS + [
AirbnbDatasetConfig(
name="all",
description=f"This is a raw, full version of Airbnb Dataset."+DESCRIPTION,
data_url={"train":f"https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/all_airbnb.parquet"}
)]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=self.config.description,
homepage="https://insideairbnb.com/",
citation="",
# This defines the different columns of the dataset and their types
features=datasets.Features(
{ "id": datasets.Value(dtype="int64"),
"name": datasets.Value(dtype="string"),
"host_id": datasets.Value(dtype="int64"),
"host_name": datasets.Value(dtype="string"),
"latitude": datasets.Value(dtype="float64"),
"longitude": datasets.Value(dtype="float64"),
"neighbourhood": datasets.Value(dtype="string"),
"room_type":datasets.Value(dtype="string"),
"price":datasets.Value(dtype="float64"),
"minimum_nights":datasets.Value(dtype="int64"),
"number_of_reviews":datasets.Value(dtype="int64"),
"last_review": datasets.Value(dtype="string"),
"reviews_per_month":datasets.Value(dtype="float64"),
"calculated_host_listings_count":datasets.Value(dtype="int64"),
"availability_365":datasets.Value(dtype="int64"),
"number_of_reviews_ltm":datasets.Value(dtype="int64"),
"city":datasets.Value(dtype="string"),
"date":datasets.Value(dtype="string"),
# These are the features of your dataset like images, labels ...
}
),
)
def _split_generators(self, dl_manager: datasets.download.DownloadManager):
downloaded_files = dl_manager.download(self.config.data_url)
if self.config.name == "all":
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files["train"]})
]
else:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files["test"]})
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_tables(self, filepath):
with open(filepath, mode="rb") as f:
parquet_file = pq.ParquetFile(source=filepath)
for batch_idx, record_batch in enumerate(parquet_file.iter_batches()):
df = record_batch.to_pandas()
df.reset_index(drop=True, inplace=True)
pa_table = pa.Table.from_pandas(df)
yield f"{batch_idx}", pa_table |