Update soybean_dataset.py
Browse files- soybean_dataset.py +34 -16
soybean_dataset.py
CHANGED
|
@@ -236,45 +236,63 @@ class SoybeanDataset(datasets.GeneratorBasedBuilder):
|
|
| 236 |
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
|
| 237 |
]
|
| 238 |
|
| 239 |
-
def
|
|
|
|
| 240 |
response = requests.get(image_url)
|
| 241 |
-
response.raise_for_status() # This will raise an exception
|
| 242 |
-
|
| 243 |
-
# Open the image from the downloaded bytes and return the PIL Image
|
| 244 |
img = Image.open(BytesIO(response.content))
|
| 245 |
return img
|
| 246 |
-
|
| 247 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 248 |
|
| 249 |
def _generate_examples(self, filepath):
|
| 250 |
-
#"""Yields examples as (key, example) tuples."""
|
| 251 |
logging.info("generating examples from = %s", filepath)
|
| 252 |
|
|
|
|
| 253 |
with open(filepath, encoding="utf-8") as f:
|
| 254 |
data = csv.DictReader(f)
|
|
|
|
|
|
|
|
|
|
| 255 |
|
|
|
|
|
|
|
| 256 |
|
|
|
|
|
|
|
| 257 |
for row in data:
|
| 258 |
-
# Assuming the 'original_image' column has the full path to the image file
|
| 259 |
unique_id = row['unique_id']
|
| 260 |
original_image_path = row['original_image']
|
| 261 |
segmentation_image_path = row['segmentation_image']
|
| 262 |
sets = row['sets']
|
| 263 |
|
| 264 |
-
original_image =
|
| 265 |
-
segmentation_image =
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
|
| 267 |
-
|
| 268 |
-
# Here you need to replace 'initial_radius', 'final_radius', 'initial_angle', 'final_angle', 'target'
|
| 269 |
-
# with actual columns from your CSV or additional processing you need to do
|
| 270 |
-
yield row['unique_id'], {
|
| 271 |
"unique_id": unique_id,
|
| 272 |
"sets": sets,
|
| 273 |
"original_image": original_image,
|
| 274 |
"segmentation_image": segmentation_image,
|
| 275 |
-
# ... add other features if necessary
|
| 276 |
}
|
| 277 |
-
|
| 278 |
|
| 279 |
|
| 280 |
|
|
|
|
| 236 |
name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["valid"]}),
|
| 237 |
]
|
| 238 |
|
| 239 |
+
def download_image(self, image_url):
|
| 240 |
+
try:
|
| 241 |
response = requests.get(image_url)
|
| 242 |
+
response.raise_for_status() # This will raise an exception for HTTP errors
|
|
|
|
|
|
|
| 243 |
img = Image.open(BytesIO(response.content))
|
| 244 |
return img
|
| 245 |
+
except requests.RequestException as e:
|
| 246 |
+
logging.error(f"Error downloading {image_url}: {e}")
|
| 247 |
+
return None
|
| 248 |
+
|
| 249 |
+
def download_images_concurrently(self, image_urls):
|
| 250 |
+
images = {}
|
| 251 |
+
with ThreadPoolExecutor(max_workers=5) as executor:
|
| 252 |
+
future_to_url = {executor.submit(self.download_image, url): url for url in image_urls}
|
| 253 |
+
for future in as_completed(future_to_url):
|
| 254 |
+
url = future_to_url[future]
|
| 255 |
+
try:
|
| 256 |
+
image = future.result()
|
| 257 |
+
images[url] = image
|
| 258 |
+
except Exception as exc:
|
| 259 |
+
logging.error(f'{url} generated an exception: {exc}')
|
| 260 |
+
return images
|
| 261 |
|
| 262 |
def _generate_examples(self, filepath):
|
|
|
|
| 263 |
logging.info("generating examples from = %s", filepath)
|
| 264 |
|
| 265 |
+
image_urls = []
|
| 266 |
with open(filepath, encoding="utf-8") as f:
|
| 267 |
data = csv.DictReader(f)
|
| 268 |
+
for row in data:
|
| 269 |
+
image_urls.append(row['original_image'])
|
| 270 |
+
image_urls.append(row['segmentation_image'])
|
| 271 |
|
| 272 |
+
# Download all images concurrently
|
| 273 |
+
downloaded_images = self.download_images_concurrently(set(image_urls)) # Use set to avoid duplicate downloads
|
| 274 |
|
| 275 |
+
with open(filepath, encoding="utf-8") as f:
|
| 276 |
+
data = csv.DictReader(f)
|
| 277 |
for row in data:
|
|
|
|
| 278 |
unique_id = row['unique_id']
|
| 279 |
original_image_path = row['original_image']
|
| 280 |
segmentation_image_path = row['segmentation_image']
|
| 281 |
sets = row['sets']
|
| 282 |
|
| 283 |
+
original_image = downloaded_images.get(original_image_path)
|
| 284 |
+
segmentation_image = downloaded_images.get(segmentation_image_path)
|
| 285 |
+
|
| 286 |
+
if original_image is None or segmentation_image is None:
|
| 287 |
+
logging.error(f"Missing image for {unique_id}")
|
| 288 |
+
continue
|
| 289 |
|
| 290 |
+
yield unique_id, {
|
|
|
|
|
|
|
|
|
|
| 291 |
"unique_id": unique_id,
|
| 292 |
"sets": sets,
|
| 293 |
"original_image": original_image,
|
| 294 |
"segmentation_image": segmentation_image,
|
|
|
|
| 295 |
}
|
|
|
|
| 296 |
|
| 297 |
|
| 298 |
|