File size: 1,255 Bytes
1038c33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from datasets import Dataset, DatasetDict
import pyarrow as pa
import pyarrow.parquet as pq

# Define the directory to save Parquet files
parquet_dir = "./dataset_parquet"

# Create the directory if it doesn't exist
os.makedirs(parquet_dir, exist_ok=True)

# Load your CSV file into a pandas DataFrame
df = pd.read_csv("data-final.csv", delimiter='\t')

# Split the DataFrame into train, validation, and test sets
train_df, temp_df = train_test_split(df, test_size=0.4, random_state=42)
val_df, test_df = train_test_split(temp_df, test_size=0.5, random_state=42)

# Convert the pandas DataFrames to Hugging Face Datasets
train_dataset = Dataset.from_pandas(train_df)
val_dataset = Dataset.from_pandas(val_df)
test_dataset = Dataset.from_pandas(test_df)

# Create a DatasetDict
dataset_dict = DatasetDict({
    "train": train_dataset,
    "validation": val_dataset,
    "test": test_dataset
})

# Convert each split to Parquet format and save
for split_name, dataset in dataset_dict.items():
    table = pa.Table.from_pandas(dataset.to_pandas())
    pq.write_table(table, os.path.join(parquet_dir, f"{split_name}.parquet"))

print("Dataset splits saved as Parquet files.")