airbnb_multicity / airbnb_multicity.py
moska's picture
changed names with resolutions to just numbers instead of res_number
806ae69
import datasets
import pyarrow as pa
import pyarrow.parquet as pq
DESCRIPTION = "The dataset contains Airbnb data from 80 capitals and major cities all around the world."
# DATA_URL="https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/all_airbnb.parquet"
DATA_DIRS = ["benchmark", "all"]
RESOLUTIONS=["8","9","10"]
class AirbnbDatasetConfig(datasets.BuilderConfig):
"""BuilderConfig """
def __init__(self, data_url, **kwargs):
"""BuilderConfig.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(AirbnbDatasetConfig, self).__init__(**kwargs)
self.data_url = data_url
class AirbnbDataset(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = AirbnbDatasetConfig
DEFAULT_CONFIG_NAME = "8"
BUILDER_CONFIGS = [
AirbnbDatasetConfig(
name = res,
description = f"This is the official train test split for Airbnb Datatset in h3 resolution = {res}. Benchmark cities are: Paris, London, Rome, Melbourne, New York City, Amsterdam.",
data_url={
"train": f"https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/res_{res}/airbnb_train.parquet",
"test": f"https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/res_{res}/airbnb_test.parquet"
}
)
for res in RESOLUTIONS
]
BUILDER_CONFIGS = BUILDER_CONFIGS + [
AirbnbDatasetConfig(
name="all",
description=f"This is a raw, full version of Airbnb Dataset."+DESCRIPTION,
data_url={"train":f"https://huggingface.co/datasets/kraina/airbnb_multicity/resolve/main/data/all_airbnb.parquet"}
)]
def _info(self):
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=self.config.description,
homepage="https://insideairbnb.com/",
citation="",
# This defines the different columns of the dataset and their types
features=datasets.Features(
{ "id": datasets.Value(dtype="int64"),
"name": datasets.Value(dtype="string"),
"host_id": datasets.Value(dtype="int64"),
"host_name": datasets.Value(dtype="string"),
"latitude": datasets.Value(dtype="float64"),
"longitude": datasets.Value(dtype="float64"),
"neighbourhood": datasets.Value(dtype="string"),
"room_type":datasets.Value(dtype="string"),
"price":datasets.Value(dtype="float64"),
"minimum_nights":datasets.Value(dtype="int64"),
"number_of_reviews":datasets.Value(dtype="int64"),
"last_review": datasets.Value(dtype="string"),
"reviews_per_month":datasets.Value(dtype="float64"),
"calculated_host_listings_count":datasets.Value(dtype="int64"),
"availability_365":datasets.Value(dtype="int64"),
"number_of_reviews_ltm":datasets.Value(dtype="int64"),
"city":datasets.Value(dtype="string"),
"date":datasets.Value(dtype="string"),
# These are the features of your dataset like images, labels ...
}
),
)
def _split_generators(self, dl_manager: datasets.download.DownloadManager):
downloaded_files = dl_manager.download(self.config.data_url)
if self.config.name == "all":
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files["train"]})
]
else:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': downloaded_files["train"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': downloaded_files["test"]})
]
# method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
def _generate_tables(self, filepath):
with open(filepath, mode="rb") as f:
parquet_file = pq.ParquetFile(source=filepath)
for batch_idx, record_batch in enumerate(parquet_file.iter_batches()):
df = record_batch.to_pandas()
df.reset_index(drop=True, inplace=True)
pa_table = pa.Table.from_pandas(df)
yield f"{batch_idx}", pa_table