blumenstiel commited on
Commit
4063393
·
verified ·
1 Parent(s): d254e9b

Update terramesh.py

Browse files
Files changed (1) hide show
  1. terramesh.py +84 -9
terramesh.py CHANGED
@@ -19,18 +19,20 @@
19
  import os
20
  import io
21
  import re
 
 
22
  import zarr
23
  import fsspec
24
  import itertools
25
  import braceexpand
26
  import numpy as np
 
27
  import webdataset as wds
28
  from collections.abc import Callable, Iterable
29
  from sympy.printing.pytorch import torch
30
  from torch.utils.data._utils.collate import default_collate
31
  from webdataset.handlers import warn_and_continue
32
 
33
-
34
  # Definition of all shard files in TerraMesh
35
  split_files = {
36
  "ssl4eos12": {
@@ -56,7 +58,6 @@ def build_terramesh_dataset(
56
  batch_size: int = 8,
57
  *args, **kwargs,
58
  ):
59
-
60
  if len(modalities) == 1:
61
  # Build standard WebDataset for single modality
62
  dataset = build_wds_dataset(
@@ -172,12 +173,11 @@ def build_multimodal_dataset(
172
  lst.remove(value)
173
  return lst
174
 
175
-
176
  majortom_mod = f"[{','.join(filter_list(modalities, 'S1GRD'))}]"
177
  ssl4eos12_mod = f"[{','.join(filter_list(modalities, 'S1RTC'))}]"
178
 
179
  # Joins majortom and ssl4eos12 shard files with "::"
180
- urls = (os.path.join(path, split, majortom_mod, split_files["majortom"][split][0])
181
  + "::" + os.path.join(path, split, ssl4eos12_mod, split_files["ssl4eos12"][split][0]))
182
 
183
  dataset = build_datapipeline(urls, transform, batch_size, *args, **kwargs)
@@ -193,12 +193,12 @@ def build_datapipeline(urls, transform, batch_size, *args, **kwargs):
193
  wds.decode(zarr_decoder), # Decode from bytes to PIL images, numpy arrays, etc.
194
  wds.map(drop_time_dim), # Remove time dimension from tensors
195
  wds.map(remove_extensions), # Remove "file extensions" from dictionary keys
196
- ( # Apply transformation
197
  wds.map(transform)
198
  if transform is not None
199
  else wds.map(identity)
200
  ),
201
- ( # Batching
202
  wds.batched(batch_size, collation_fn=default_collate, partial=False)
203
  if batch_size is not None
204
  else wds.map(identity)
@@ -240,8 +240,8 @@ def remove_extensions(sample):
240
 
241
 
242
  def multi_tarfile_samples(
243
- src_iter: Iterable[dict],
244
- handler: Callable[[Exception], bool] = warn_and_continue,
245
  ):
246
  """
247
  This function is adapted from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
@@ -289,7 +289,7 @@ def multi_tarfile_samples(
289
  merged_dict["__url__"] = src["url"]
290
 
291
  for modality_name, modality_dict in zip(
292
- modality_names, multi_tar_files
293
  ):
294
  _key = modality_dict.pop("__key__")
295
  _url = modality_dict.pop("__url__")
@@ -315,3 +315,78 @@ def multi_tarfile_samples(
315
  continue
316
  else:
317
  break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  import os
20
  import io
21
  import re
22
+
23
+ import numpy
24
  import zarr
25
  import fsspec
26
  import itertools
27
  import braceexpand
28
  import numpy as np
29
+ import albumentations
30
  import webdataset as wds
31
  from collections.abc import Callable, Iterable
32
  from sympy.printing.pytorch import torch
33
  from torch.utils.data._utils.collate import default_collate
34
  from webdataset.handlers import warn_and_continue
35
 
 
36
  # Definition of all shard files in TerraMesh
37
  split_files = {
38
  "ssl4eos12": {
 
58
  batch_size: int = 8,
59
  *args, **kwargs,
60
  ):
 
61
  if len(modalities) == 1:
62
  # Build standard WebDataset for single modality
63
  dataset = build_wds_dataset(
 
173
  lst.remove(value)
174
  return lst
175
 
 
176
  majortom_mod = f"[{','.join(filter_list(modalities, 'S1GRD'))}]"
177
  ssl4eos12_mod = f"[{','.join(filter_list(modalities, 'S1RTC'))}]"
178
 
179
  # Joins majortom and ssl4eos12 shard files with "::"
180
+ urls = (os.path.join(path, split, majortom_mod, split_files["majortom"][split][0])
181
  + "::" + os.path.join(path, split, ssl4eos12_mod, split_files["ssl4eos12"][split][0]))
182
 
183
  dataset = build_datapipeline(urls, transform, batch_size, *args, **kwargs)
 
193
  wds.decode(zarr_decoder), # Decode from bytes to PIL images, numpy arrays, etc.
194
  wds.map(drop_time_dim), # Remove time dimension from tensors
195
  wds.map(remove_extensions), # Remove "file extensions" from dictionary keys
196
+ ( # Apply transformation
197
  wds.map(transform)
198
  if transform is not None
199
  else wds.map(identity)
200
  ),
201
+ ( # Batching
202
  wds.batched(batch_size, collation_fn=default_collate, partial=False)
203
  if batch_size is not None
204
  else wds.map(identity)
 
240
 
241
 
242
  def multi_tarfile_samples(
243
+ src_iter: Iterable[dict],
244
+ handler: Callable[[Exception], bool] = warn_and_continue,
245
  ):
246
  """
247
  This function is adapted from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
 
289
  merged_dict["__url__"] = src["url"]
290
 
291
  for modality_name, modality_dict in zip(
292
+ modality_names, multi_tar_files
293
  ):
294
  _key = modality_dict.pop("__key__")
295
  _url = modality_dict.pop("__url__")
 
315
  continue
316
  else:
317
  break
318
+
319
+
320
+ class Transpose(albumentations.ImageOnlyTransform):
321
+ """
322
+ Rearrange is a generic image transformation that reshapes an input tensor using a custom einops pattern.
323
+
324
+ This transform allows flexible reordering of tensor dimensions based on the provided pattern and arguments.
325
+ """
326
+
327
+ def __init__(self, axis: list):
328
+ """
329
+ Initialize the Transpose transform.
330
+
331
+ Args:
332
+ axis (list): Axis for numpy.transpose.
333
+ """
334
+ super().__init__(p=1)
335
+ self.axis = axis
336
+
337
+ def apply(self, img, **params):
338
+ return numpy.transpose(img, self.axis)
339
+
340
+ def get_transform_init_args_names(self):
341
+ return "transpose"
342
+
343
+
344
+ def default_non_image_transform(array):
345
+ if hasattr(array, 'dtype') and (array.dtype == float or array.dtype == int):
346
+ return torch.from_numpy(array)
347
+ else:
348
+ return array
349
+
350
+
351
+ class MultimodalTransforms:
352
+ """
353
+ MultimodalTransforms applies albumentations transforms to multiple image modalities.
354
+
355
+ This class supports both shared transformations across modalities and separate transformations for each modality.
356
+ It also handles non-image modalities by applying a specified non-image transform.
357
+
358
+ This code is adapted from https://github.com/IBM/terratorch/blob/main/terratorch/datasets/transforms.py.
359
+ """
360
+
361
+ def __init__(
362
+ self,
363
+ transforms: dict | albumentations.Compose,
364
+ shared: bool = True,
365
+ non_image_modalities: list[str] | None = None,
366
+ non_image_transforms: object | None = None,
367
+ ):
368
+ """
369
+ Initialize the MultimodalTransforms.
370
+
371
+ Args:
372
+ transforms (dict or A.Compose): The transformation(s) to apply to the data.
373
+ non_image_modalities (list[str] | None): List of keys corresponding to non-image modalities.
374
+ non_image_transforms (object | None): A transform to apply to non-image modalities.
375
+ If None, a default transform is used.
376
+ """
377
+ self.transforms = transforms
378
+ self.non_image_modalities = non_image_modalities or []
379
+ self.non_image_transforms = non_image_transforms or default_non_image_transform
380
+
381
+ def __call__(self, data: dict):
382
+ # albumentations requires a key 'image' and treats all other keys as additional targets
383
+ image_modality = [k for k in data.keys() if k not in self.non_image_modalities][0]
384
+ data['image'] = data.pop(image_modality)
385
+ data = self.transforms(**data)
386
+ data[image_modality] = data.pop('image')
387
+
388
+ # Process sequence data which is ignored by albumentations as 'global_label'
389
+ for modality in self.non_image_modalities:
390
+ data[modality] = self.non_image_transforms(data[modality])
391
+
392
+ return data