Update README.md
Browse files
README.md
CHANGED
@@ -102,22 +102,23 @@ instances["categories"]
|
|
102 |
## Build the dataset and upload to Hub
|
103 |
|
104 |
```py
|
105 |
-
!
|
106 |
-
!wget http://images.cocodataset.org/zips/val2017.zip
|
107 |
-
!wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip
|
108 |
|
109 |
-
|
110 |
-
!
|
111 |
-
!
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
import json
|
|
|
114 |
from pathlib import Path
|
115 |
from tqdm import tqdm
|
116 |
-
from
|
117 |
-
from datasets import Dataset, DatasetDict, Features, Value, Sequence, Array2D
|
118 |
-
import shutil
|
119 |
|
120 |
-
# === Paths ===
|
121 |
base_dir = Path("/content")
|
122 |
splits = {
|
123 |
"train": {
|
@@ -130,11 +131,15 @@ splits = {
|
|
130 |
}
|
131 |
}
|
132 |
output_dir = base_dir / "coco_imagefolder"
|
|
|
133 |
|
134 |
-
|
135 |
-
if
|
136 |
-
|
137 |
-
|
|
|
|
|
|
|
138 |
|
139 |
def convert_coco_to_jsonl(image_dir, annotation_path, output_metadata_path):
|
140 |
with open(annotation_path) as f:
|
@@ -145,42 +150,46 @@ def convert_coco_to_jsonl(image_dir, annotation_path, output_metadata_path):
|
|
145 |
|
146 |
for ann in data['annotations']:
|
147 |
img_id = ann['image_id']
|
148 |
-
bbox = ann['bbox']
|
149 |
category = ann['category_id']
|
|
|
|
|
|
|
|
|
150 |
|
151 |
if img_id not in annotations_by_image:
|
152 |
annotations_by_image[img_id] = {
|
153 |
"file_name": id_to_filename[img_id],
|
154 |
"objects": {
|
155 |
"bbox": [],
|
156 |
-
"
|
|
|
157 |
}
|
158 |
}
|
159 |
|
160 |
annotations_by_image[img_id]["objects"]["bbox"].append(bbox)
|
|
|
161 |
annotations_by_image[img_id]["objects"]["categories"].append(category)
|
162 |
|
163 |
with open(output_metadata_path, "w") as f:
|
164 |
-
for
|
165 |
json.dump(metadata, f)
|
166 |
f.write("\n")
|
167 |
|
168 |
-
#
|
169 |
for split, info in splits.items():
|
170 |
split_dir = output_dir / split
|
171 |
-
split_dir.mkdir(parents=True)
|
172 |
|
173 |
# Copy images
|
174 |
for img_path in tqdm(info["image_dir"].glob("*.jpg"), desc=f"Copying {split} images"):
|
175 |
shutil.copy(img_path, split_dir / img_path.name)
|
176 |
|
177 |
-
#
|
178 |
metadata_path = split_dir / "metadata.jsonl"
|
179 |
convert_coco_to_jsonl(split_dir, info["annotation_file"], metadata_path)
|
180 |
|
181 |
-
#
|
182 |
-
|
183 |
-
|
184 |
-
dataset = load_dataset("imagefolder", data_dir="/content/coco_imagefolder")
|
185 |
dataset.push_to_hub("ariG23498/coco2017")
|
186 |
```
|
|
|
102 |
## Build the dataset and upload to Hub
|
103 |
|
104 |
```py
|
105 |
+
!pip install -U -q datasets
|
|
|
|
|
106 |
|
107 |
+
# Download and unzip COCO 2017
|
108 |
+
!wget -q http://images.cocodataset.org/zips/train2017.zip
|
109 |
+
!wget -q http://images.cocodataset.org/zips/val2017.zip
|
110 |
+
!wget -q http://images.cocodataset.org/annotations/annotations_trainval2017.zip
|
111 |
+
|
112 |
+
!unzip -q train2017.zip
|
113 |
+
!unzip -q val2017.zip
|
114 |
+
!unzip -q annotations_trainval2017.zip
|
115 |
|
116 |
import json
|
117 |
+
import shutil
|
118 |
from pathlib import Path
|
119 |
from tqdm import tqdm
|
120 |
+
from datasets import load_dataset
|
|
|
|
|
121 |
|
|
|
122 |
base_dir = Path("/content")
|
123 |
splits = {
|
124 |
"train": {
|
|
|
131 |
}
|
132 |
}
|
133 |
output_dir = base_dir / "coco_imagefolder"
|
134 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
135 |
|
136 |
+
def normalize_segmentation(segmentation):
|
137 |
+
if isinstance(segmentation, list):
|
138 |
+
if all(isinstance(poly, list) for poly in segmentation):
|
139 |
+
return segmentation # already a list of polygons
|
140 |
+
elif all(isinstance(pt, (int, float)) for pt in segmentation):
|
141 |
+
return [segmentation] # wrap single polygon
|
142 |
+
return [] # skip RLE or malformed segmentations
|
143 |
|
144 |
def convert_coco_to_jsonl(image_dir, annotation_path, output_metadata_path):
|
145 |
with open(annotation_path) as f:
|
|
|
150 |
|
151 |
for ann in data['annotations']:
|
152 |
img_id = ann['image_id']
|
153 |
+
bbox = ann['bbox']
|
154 |
category = ann['category_id']
|
155 |
+
segmentation = normalize_segmentation(ann['segmentation'])
|
156 |
+
|
157 |
+
if not segmentation:
|
158 |
+
continue # skip if malformed or RLE
|
159 |
|
160 |
if img_id not in annotations_by_image:
|
161 |
annotations_by_image[img_id] = {
|
162 |
"file_name": id_to_filename[img_id],
|
163 |
"objects": {
|
164 |
"bbox": [],
|
165 |
+
"segmentation": [],
|
166 |
+
"categories": [],
|
167 |
}
|
168 |
}
|
169 |
|
170 |
annotations_by_image[img_id]["objects"]["bbox"].append(bbox)
|
171 |
+
annotations_by_image[img_id]["objects"]["segmentation"].append(segmentation)
|
172 |
annotations_by_image[img_id]["objects"]["categories"].append(category)
|
173 |
|
174 |
with open(output_metadata_path, "w") as f:
|
175 |
+
for metadata in annotations_by_image.values():
|
176 |
json.dump(metadata, f)
|
177 |
f.write("\n")
|
178 |
|
179 |
+
# Build imagefolder structure
|
180 |
for split, info in splits.items():
|
181 |
split_dir = output_dir / split
|
182 |
+
split_dir.mkdir(parents=True, exist_ok=True)
|
183 |
|
184 |
# Copy images
|
185 |
for img_path in tqdm(info["image_dir"].glob("*.jpg"), desc=f"Copying {split} images"):
|
186 |
shutil.copy(img_path, split_dir / img_path.name)
|
187 |
|
188 |
+
# Write JSONL metadata
|
189 |
metadata_path = split_dir / "metadata.jsonl"
|
190 |
convert_coco_to_jsonl(split_dir, info["annotation_file"], metadata_path)
|
191 |
|
192 |
+
# Load and push
|
193 |
+
dataset = load_dataset("imagefolder", data_dir=str(output_dir))
|
|
|
|
|
194 |
dataset.push_to_hub("ariG23498/coco2017")
|
195 |
```
|