blumenstiel commited on
Commit
9453ae9
·
verified ·
1 Parent(s): a8410cb

Create terramesh.py

Browse files
Files changed (1) hide show
  1. terramesh.py +317 -0
terramesh.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 IBM Corp.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ #
15
+ # This file includes code adapted from the original work by EPFL and Apple Inc.,
16
+ # licensed under the Apache License, Version 2.0.
17
+ # Source: https://github.com/apple/ml-4m/
18
+
19
+ import os
20
+ import io
21
+ import re
22
+ import zarr
23
+ import fsspec
24
+ import itertools
25
+ import braceexpand
26
+ import numpy as np
27
+ import webdataset as wds
28
+ from collections.abc import Callable, Iterable
29
+ from sympy.printing.pytorch import torch
30
+ from torch.utils.data._utils.collate import default_collate
31
+ from webdataset.handlers import warn_and_continue
32
+
33
+
34
+ # Definition of all shard files in TerraMesh
35
+ split_files = {
36
+ "ssl4eos12": {
37
+ "train": ["ssl4eos12_shard_{000794..000889}.tar"],
38
+ "val": ["ssl4eos12_shard_000009.tar"],
39
+ },
40
+ "majortom": {
41
+ "train": ["majortom_shard_{000001..000793}.tar"],
42
+ "val": ["majortom_shard_{000001..000008}.tar"],
43
+ },
44
+ "combined": {
45
+ "train": ["majortom_shard_{000001..000793}.tar", "ssl4eos12_shard_{000794..000889}.tar"],
46
+ "val": ["majortom_shard_{000001..000008}.tar", "ssl4eos12_shard_000009.tar"],
47
+ }
48
+ }
49
+
50
+
51
+ def build_terramesh_dataset(
52
+ path: str = "https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/",
53
+ modalities=None,
54
+ split: str = "val",
55
+ urls: str | None = None,
56
+ batch_size: int = 8,
57
+ *args, **kwargs,
58
+ ):
59
+
60
+ if len(modalities) == 1:
61
+ # Build standard WebDataset for single modality
62
+ dataset = build_wds_dataset(
63
+ path=path,
64
+ modality=modalities[0],
65
+ split=split,
66
+ urls=urls,
67
+ batch_size=batch_size,
68
+ *args, **kwargs
69
+ )
70
+ return dataset
71
+
72
+ else:
73
+ # Build custom multi-modal dataset
74
+ dataset = build_multimodal_dataset(
75
+ path=path,
76
+ modalities=modalities,
77
+ split=split,
78
+ urls=urls,
79
+ batch_size=batch_size,
80
+ *args, **kwargs,
81
+ )
82
+ return dataset
83
+
84
+
85
+ def zarr_decoder(key, value):
86
+ if key == "zarr.zip" or key.endswith(".zarr.zip"):
87
+ mapper = fsspec.filesystem("zip", fo=io.BytesIO(value), block_size=None).get_mapper("")
88
+ return zarr.open_consolidated(mapper, mode="r")['bands'][...]
89
+
90
+
91
+ def identity(sample):
92
+ """Identity function that does nothing."""
93
+ return sample
94
+
95
+
96
+ def drop_time_dim(value, dim: int = 0):
97
+ """
98
+ Remove time dimension from data tensors.
99
+ """
100
+ if isinstance(value, np.ndarray) or isinstance(value, torch.Tensor):
101
+ return value.squeeze(dim)
102
+
103
+ elif isinstance(value, dict):
104
+ for k, v in value.items():
105
+ if isinstance(v, np.ndarray) or isinstance(v, torch.Tensor):
106
+ value[k] = v.squeeze(dim)
107
+ return value
108
+
109
+
110
+ def build_wds_dataset(
111
+ path: str = "https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/",
112
+ modality: str = "S2L2A",
113
+ split: str = "val",
114
+ urls: str | None = None,
115
+ batch_size: int = 8,
116
+ transform: Callable = None,
117
+ *args, **kwargs
118
+ ):
119
+ if urls is None:
120
+ # Select split files
121
+ if modality == "S1GRD":
122
+ files = split_files["ssl4eos12"][split]
123
+ elif modality == "S1GRD":
124
+ files = split_files["majortom"][split]
125
+ else:
126
+ files = split_files["combined"][split]
127
+
128
+ # Joins majortom and ssl4eos12 shard files with "::" (except for S-1 modalities)
129
+ urls = "::".join(
130
+ [os.path.join(path, split, modality, f) for f in files]
131
+ )
132
+
133
+ kwargs["shardshuffle"] = kwargs.get("shardshuffle", 100) # Shuffle shard by default
134
+
135
+ # Build dataset
136
+ dataset = (
137
+ wds.WebDataset(urls, *args, **kwargs)
138
+ .decode(zarr_decoder) # Decode byte files
139
+ .rename(image='zarr.zip')
140
+ .map_dict(image=drop_time_dim) # Remove temporal dimension
141
+ )
142
+
143
+ if transform is not None:
144
+ dataset = dataset.map(transform)
145
+
146
+ # Create batches
147
+ if batch_size is not None:
148
+ dataset = dataset.batched(batch_size)
149
+
150
+ return dataset
151
+
152
+
153
+ def combine_datasets(*args):
154
+ return itertools.chain(*args)
155
+
156
+
157
+ def build_multimodal_dataset(
158
+ path: str = "https://huggingface.co/datasets/ibm-esa-geospatial/TerraMesh/resolve/main/",
159
+ modalities: str = "S2L2A",
160
+ split: str = "val",
161
+ urls: str | None = None,
162
+ batch_size: int = 8,
163
+ transform: Callable = None,
164
+ *args, **kwargs
165
+ ):
166
+ if urls is None:
167
+ # Filter modalities based availability (S1GRD and S1RTC not present in all subsets)
168
+ def filter_list(lst, value):
169
+ lst = lst.copy()
170
+ # helper function to filter modalities
171
+ if value in lst:
172
+ lst.remove(value)
173
+ return lst
174
+
175
+
176
+ majortom_mod = f"[{','.join(filter_list(modalities, 'S1GRD'))}]"
177
+ ssl4eos12_mod = f"[{','.join(filter_list(modalities, 'S1RTC'))}]"
178
+
179
+ # Joins majortom and ssl4eos12 shard files with "::"
180
+ urls = (os.path.join(path, split, majortom_mod, split_files["majortom"][split][0])
181
+ + "::" + os.path.join(path, split, ssl4eos12_mod, split_files["ssl4eos12"][split][0]))
182
+
183
+ dataset = build_datapipeline(urls, transform, batch_size, *args, **kwargs)
184
+ return dataset
185
+
186
+
187
+ def build_datapipeline(urls, transform, batch_size, *args, **kwargs):
188
+ datapipeline = wds.DataPipeline(
189
+ # Infinitely sample shards from the shard list with replacement. Each worker is seeded independently.
190
+ wds.ResampledShards(urls),
191
+ multi_tarfile_samples, # Extract individual samples from multi-modal tar files
192
+ wds.shuffle(100), # Shuffle with a buffer of given size
193
+ wds.decode(zarr_decoder), # Decode from bytes to PIL images, numpy arrays, etc.
194
+ wds.map(drop_time_dim), # Remove time dimension from tensors
195
+ wds.map(remove_extensions), # Remove "file extensions" from dictionary keys
196
+ ( # Apply transformation
197
+ wds.map(transform)
198
+ if transform is not None
199
+ else wds.map(identity)
200
+ ),
201
+ ( # Batching
202
+ wds.batched(batch_size, collation_fn=default_collate, partial=False)
203
+ if batch_size is not None
204
+ else wds.map(identity)
205
+ ),
206
+ )
207
+ return datapipeline
208
+
209
+
210
+ def extract_modality_names(s):
211
+ """
212
+ Function from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
213
+ """
214
+ # Regular expression pattern to match anything enclosed in '{' and '}', and comma separated
215
+ pattern = r"\{([^}]*)\}"
216
+ match = re.search(pattern, s)
217
+ return match.group(1).split(",") if match else []
218
+
219
+
220
+ def remove_ext_with_gz(s):
221
+ """
222
+ Function from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
223
+ """
224
+ if s.endswith(".gz"):
225
+ s = s.replace(".gz", "")
226
+ if s.endswith(".zip"):
227
+ s = s.replace(".zip", "")
228
+ return os.path.splitext(s)[0]
229
+
230
+
231
+ def remove_extensions(sample):
232
+ """
233
+ Function from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
234
+
235
+ In webdatasets, we identify the type of a given modality by adding an extension
236
+ in the form f"{modality_name}.{modality_extension}", e.g. "rgb.jpg" or "caption.json".
237
+ This function removes them and returns a dictionary of {f"{modality_name}": modality}.
238
+ """
239
+ return {remove_ext_with_gz(k): v for k, v in sample.items()}
240
+
241
+
242
+ def multi_tarfile_samples(
243
+ src_iter: Iterable[dict],
244
+ handler: Callable[[Exception], bool] = warn_and_continue,
245
+ ):
246
+ """
247
+ This function is adapted from https://github.com/apple/ml-4m/blob/main/fourm/data/unified_datasets.py.
248
+
249
+ Webdataset does not support splitting up shards by modality, so we need to do this manually.
250
+ Usually, we would need to save all modalities in the same tar file, e.g. shard_root_train/{00000..12345}.tar,
251
+ where each shard contains 1000 samples and each sample contains all modalities.
252
+ This is not flexible when adding new modalities, so we instead save each modality in a separate tar file,
253
+ e.g. shard_root_train_rgb/{00000..12345}.tar, shard_root_train_caption/{00000..12345}.tar, etc., where each shard contains
254
+ again 1000 samples, but each sample contains only one modality. All samples in all shards have to be aligned.
255
+
256
+ This function takes an iterator over shard URLs, where we use brace expansion to specify multiple tar files per modality.
257
+ E.g. shard_root_train_[rgb,caption]/00123.tar will be expanded to shard_root_train_rgb/00123.tar and shard_root_train_caption/00123.tar,
258
+ and the samples from these two tar files will be combined into a single sample.
259
+
260
+ Args:
261
+ src_iter: Iterator over shards that *already brace expanded the shard numbers*,
262
+ e.g. {'url': 'shard_root_train_[rgb,caption]/00000.tar'}, {'url': 'shard_root_train_[rgb,caption]/00001.tar'}, ...
263
+ This function will also work when no square braces for multiple modalities are used, e.g. {'url': 'shard_root_train/00000.tar'}, ...
264
+ It can be a drop-in replacement for wds.tarfile_samples.
265
+ handler: Function that handles exceptions. If it returns True, the shard is skipped. If it returns False, the function exits.
266
+
267
+ Yields:
268
+ Dictionary of aligned samples from all modalities.
269
+ """
270
+
271
+ for src in src_iter:
272
+
273
+ # Multi tar file URLs use brace expansion with square braces
274
+ multi_tar_urls = src["url"].translate(str.maketrans("[]", "{}"))
275
+ modality_names = extract_modality_names(multi_tar_urls)
276
+ multi_tar_urls = list(braceexpand.braceexpand(multi_tar_urls))
277
+
278
+ # Create tar iterators for shards of all modalities
279
+ tar_iters = [
280
+ wds.tarfile_samples([{"url": tar_url}]) for tar_url in multi_tar_urls
281
+ ]
282
+
283
+ try:
284
+ # Loop over these iterators in parallel and combine the tar files from different modalities
285
+ for multi_tar_files in zip(*tar_iters):
286
+
287
+ merged_dict = {}
288
+ merged_dict["__key__"] = multi_tar_files[0]["__key__"]
289
+ merged_dict["__url__"] = src["url"]
290
+
291
+ for modality_name, modality_dict in zip(
292
+ modality_names, multi_tar_files
293
+ ):
294
+ _key = modality_dict.pop("__key__")
295
+ _url = modality_dict.pop("__url__")
296
+
297
+ if _key != merged_dict["__key__"]:
298
+ raise ValueError(
299
+ f"Divergence detected! Trying to merge keys {_key} of {modality_name} and {merged_dict['__key__']} of merged_dict with modalities {merged_dict.keys()}."
300
+ )
301
+
302
+ for k, v in modality_dict.items():
303
+ if modality_name is None:
304
+ merged_dict[k] = v
305
+ else:
306
+ merged_dict[f"{modality_name}.{k}"] = v
307
+
308
+ yield merged_dict
309
+
310
+ except Exception as e:
311
+ print(e)
312
+ print(f"Exception occurred while processing {src['url']}.")
313
+ if handler(e):
314
+ print("Skipping shard...")
315
+ continue
316
+ else:
317
+ break