Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-995920d4-3066-4bd1-985c-53b12cb9e83c1753010233944-2025_07_20-13.18.05.231/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-036b3473-9f9d-44a0-a906-2567459f706c1754119162409-2025_08_02-09.19.29.910/source.csv +119 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0f6ed796-3fd2-4eb6-ad79-777c3b4353711750804480941-2025_06_25-00.35.32.827/source.csv +215 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0f7f1a2c-8092-4a29-81e5-3d0f406b88711751465682686-2025_07_02-16.15.28.930/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-207fe3e3-7fd2-432d-a410-a7a943195e5f1753557295596-2025_07_26-21.15.03.812/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-24737a5e-b7f6-491e-94c6-0c20304cd1e41754227167268-2025_08_03-15.19.34.553/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-372ef62d-2075-43ff-ac1a-e2025fd873c41751612450082-2025_07_04-09.01.47.125/source.csv +47 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3e3ce02e-664a-4f58-9d7f-0f56e32c7def1753363875204-2025_07_24-15.31.23.202/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-46148775-4197-4774-bf77-8631ca6b73f01753557591807-2025_07_26-21.19.58.968/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5d37fb4d-73be-43f4-bdda-1c3c7db3bdf31752589529764-2025_07_15-16.25.37.55/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-659ce403-f41d-4217-a14b-978086650bc21753384200795-2025_07_24-21.10.09.978/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6da126cd-f641-4a15-bc10-f51c6b432fda1753788630792-2025_07_29-13.30.37.18/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-7879e034-f897-48e8-8481-1a87a73b0dc81752135543307-2025_07_10-10.19.09.565/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-803c0497-2cfb-4591-a2cb-bac49e1c774c1751564777714-2025_07_03-19.47.11.372/source.csv +141 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-90a7d2df-8655-400e-8729-5616e02268171751547517319-2025_07_03-14.58.47.421/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-9899324d-b79d-45d0-8d10-efdb2a606e141753968905455-2025_07_31-15.35.12.899/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-9e1e9a37-9c93-49f5-86bb-957e61f072951753546714480-2025_07_26-18.18.42.334/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-a06af857-5252-41ea-b944-6fb276580a331751465854969-2025_07_02-16.18.54.752/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-a5280753-495e-4391-8448-c3c5679e94b01753346266343-2025_07_24-10.37.53.687/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-ba0d66ec-6cdf-4899-b3c6-5e02420596081753426538563-2025_07_25-08.55.50.825/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-c0810100-7d8e-4105-bf12-84868f66800c1753216757462-2025_07_22-22.39.25.825/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-ccd47d51-aa58-4f87-bfea-141fbcfe923f1754059235262-2025_08_01-16.40.40.908/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f2478f34-0247-46ae-b2bf-367520171d271754324711637-2025_08_04-18.25.17.542/source.csv +4 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f493ee6f-fdfb-4a84-bc4c-0358523e54001754136495510-2025_08_02-14.08.22.904/source.csv +6 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f4d5bf7f-03bc-460b-912a-a4218449896f1754112326764-2025_08_02-07.25.34.900/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-172d798b-bee8-455a-b904-9dd3fe6387d51754411154298-2025_08_05-18.25.56.221/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-222fe98e-29ac-4b20-9a65-fe2e31f8eb701751128122769-2025_06_28-09.28.47.536/source.csv +29 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-29e2cbae-7056-4585-b457-f48bd451c3fd1750644341589-2025_06_22-19.05.43.270/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-2bb200ce-4bc8-4bc3-9354-29e24db5d38e1752063967983-2025_07_09-14.26.42.463/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-48fbb415-6db9-4d35-b548-561e828791bf1751383187013-2025_07_01-17.19.57.60/source.csv +4 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-53035602-cd5a-4dad-bc79-2cb4d8d4f7681751162692203-2025_06_28-19.04.53.413/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-55b74e48-50e3-4bf3-8e02-f03e464c22ac1750632538084-2025_06_22-15.48.59.681/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-67e49b73-4378-4d9b-aa07-eb22704d83ae1750992411736-2025_06_26-19.46.53.239/source.csv +241 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-7f860396-c5c8-4f1f-8ce7-04e005748e611754402256906-2025_08_05-15.57.44.850/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-84bc9952-c4b0-4456-bdc2-984faf53684f1751163593750-2025_06_28-19.19.55.196/source.csv +9 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-895267d6-5fbc-45e8-bc56-0d7c756881181750708632303-2025_06_23-12.57.13.921/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-8e0958c9-e396-41d9-b3d4-8a748cefa1701750701699946-2025_06_23-11.01.41.744/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-978ed4eb-d9c9-4380-b981-e501087459181750623968304-2025_06_22-13.26.11.394/source.csv +7 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b00cd52f-686b-4cad-89ec-cf5dcdc287a11753702370531-2025_07_28-13.32.59.505/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b08d92a3-9c0a-4526-b12f-c973e9c3c43f1752071802867-2025_07_09-16.36.43.962/source.csv +267 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b3271939-bd4f-497b-b876-5ea890ece75f1750632226677-2025_06_22-15.43.48.822/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-d413e23e-d7da-4c64-9e15-0b0c0e6031031751383188198-2025_07_01-17.19.54.522/source.csv +5 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-d9cdf338-0ddd-4679-853a-6d7bdf2b18581751046137722-2025_06_27-10.42.19.354/source.csv +167 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-f0382786-979c-4a6d-8e9b-f5977f18eb4f1753726151187-2025_07_28-20.09.13.67/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-f818bac9-3228-48bb-85cd-ad930fdb35d91752220838711-2025_07_11-10.00.40.248/source.csv +0 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-faba6583-b2c9-4b94-9ba6-9f240428520a1750722089894-2025_06_23-22.50.32.930/source.csv +182 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-faba6583-b2c9-4b94-9ba6-9f240428520a1750722089894-2025_06_23-23.44.53.64/source.csv +14 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-faba6583-b2c9-4b94-9ba6-9f240428520a1750722089894-2025_06_23-23.49.28.299/source.csv +151 -0
- 4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-fcaaf520-6a1e-40c5-9a72-85ae7ad4ab0b1750621325310-2025_06_22-12.42.08.659/source.csv +5 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-02cb4c77-70ba-4c2a-bfdb-bd7c7d66767f1752013690963-2025_07_09-00.29.05.866/source.csv +0 -0
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-995920d4-3066-4bd1-985c-53b12cb9e83c1753010233944-2025_07_20-13.18.05.231/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-036b3473-9f9d-44a0-a906-2567459f706c1754119162409-2025_08_02-09.19.29.910/source.csv
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,3,"experiments/sample.sh",0,0,"source .venv/bin/activate\n\ndata_dir=""$PWD/data_arrayrecord/dummy""\nckpt_dir=""$PWD/checkpoints/causal_dynamics_openai_grain_tok_restore""\n\nexport PYTHONUNBUFFERED=1\nsrun ipython --pdb sample.py -- \\n --dyna_type ""causal"" \\n --batch_size 1 \\n --seq_len 3 \\n --start_frame 1 \\n --checkpoint $ckpt_dir \\n --data_dir $data_dir",shellscript,tab
|
3 |
+
2,310,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:19:29 AM [info] Activating crowd-code\n9:19:29 AM [info] Recording started\n9:19:29 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,337,"experiments/sample.sh",0,0,"",shellscript,tab
|
5 |
+
4,3606,"genie.py",0,0,"from typing import Dict\nimport time\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsCausal\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nnx.Module):\n """"""Genie model""""""\n\n def __init__(\n self,\n in_dim: int,\n tokenizer_dim: int,\n tokenizer_ffn_dim: int,\n latent_patch_dim: int,\n num_patch_latents: int,\n patch_size: int,\n tokenizer_num_blocks: int,\n tokenizer_num_heads: int,\n lam_dim: int,\n lam_ffn_dim: int,\n latent_action_dim: int,\n num_latent_actions: int,\n lam_patch_size: int,\n lam_num_blocks: int,\n lam_num_heads: int,\n lam_co_train: bool,\n dyna_type: str,\n dyna_dim: int,\n dyna_ffn_dim: int,\n dyna_num_blocks: int,\n dyna_num_heads: int,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n dropout: float = 0.0,\n mask_limit: float = 0.0,\n ):\n # --- Tokenizer ---\n self.in_dim = in_dim\n self.tokenizer_dim = tokenizer_dim\n self.tokenizer_ffn_dim = tokenizer_ffn_dim\n self.latent_patch_dim = latent_patch_dim\n self.num_patch_latents = num_patch_latents\n self.patch_size = patch_size\n self.tokenizer_num_blocks = tokenizer_num_blocks\n self.tokenizer_num_heads = tokenizer_num_heads\n # --- LAM ---\n self.lam_dim = lam_dim\n self.lam_ffn_dim = lam_ffn_dim\n self.latent_action_dim = latent_action_dim\n self.num_latent_actions = num_latent_actions\n self.lam_patch_size = lam_patch_size\n self.lam_num_blocks = lam_num_blocks\n self.lam_num_heads = lam_num_heads\n self.lam_co_train = lam_co_train\n # --- Dynamics ---\n self.dyna_type = dyna_type\n self.dyna_dim = dyna_dim\n self.dyna_ffn_dim = dyna_ffn_dim\n self.dyna_num_blocks = dyna_num_blocks\n self.dyna_num_heads = dyna_num_heads\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.dropout = dropout\n self.mask_limit = mask_limit\n\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n if self.dyna_type == ""maskgit"":\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n elif self.dyna_type == ""causal"":\n self.dynamics = DynamicsCausal(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n else:\n raise ValueError(f""Invalid dynamics type: {self.dyna_type}"")\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True\n ) -> Dict[str, jax.Array]:\n videos_BTHWC = batch[""videos""]\n tokenizer_outputs = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_indices_BTN = tokenizer_outputs[""indices""]\n lam_outputs = self.lam.vq_encode(videos_BTHWC, training=False)\n z_q_BTm11L = lam_outputs[""z_q""]\n action_indices_E = lam_outputs[""indices""]\n latent_actions_BTm11L = jax.lax.cond(\n self.lam_co_train,\n lambda: z_q_BTm11L,\n lambda: jax.lax.stop_gradient(z_q_BTm11L),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(token_indices_BTN),\n latent_actions=latent_actions_BTm11L,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_logits_BTNV, dyna_mask = self.dynamics(outputs, training)\n outputs[""token_logits""] = dyna_logits_BTNV\n if dyna_mask is not None:\n outputs[""mask""] = dyna_mask\n mle_indices_BTN = jnp.argmax(outputs[""token_logits""], axis=-1)\n H, W = batch[""videos""].shape[2:4]\n outputs[""recon""] = self.tokenizer.decode(mle_indices_BTN, (H, W))\n outputs[""lam_indices""] = action_indices_E\n return outputs\n\n # FIXME (f.srambical): sampling should be moved to the dynamics classes\n def sample(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> jax.Array:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n latent_actions_E = batch[""latent_actions""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n\n def maskgit_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array], step: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array, jax.Array, jax.Array], None]:\n rng, token_idxs_BSN, mask_BSN, action_tokens_EL = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # --- Construct + encode video ---\n vid_embed_BSNM = self.dynamics.patch_embed(token_idxs_BSN)\n mask_token_111M = self.dynamics.mask_token.value\n mask_expanded_BSN1 = mask_BSN[..., None]\n vid_embed_BSNM = jnp.where(mask_expanded_BSN1, mask_token_111M, vid_embed_BSNM)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = self.dynamics.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1]))\n vid_embed_BSNM += act_embed_BS1M\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (steps * 2))\n step_temp = temperature * (1.0 - unmasked_ratio)\n final_logits_BSNV = self.dynamics.transformer(vid_embed_BSNM) / step_temp\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_BSN = jnp.argmax(final_logits_BSNV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_BSN = jax.random.categorical(_rng, final_logits_BSNV)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs_BSN = gather_fn(\n jax.nn.softmax(final_logits_BSNV), sampled_token_idxs_BSN\n )\n final_token_probs_BSN += ~mask_BSN\n # Update masked tokens only\n token_idxs_BSN = jnp.where(mask_BSN, sampled_token_idxs_BSN, token_idxs_BSN)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask_N = jnp.arange(final_token_probs_BSN.shape[-1]) > num_unmasked_tokens\n sorted_idxs_BSN = jnp.argsort(final_token_probs_BSN, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask_N))\n new_mask_BSN = mask_update_fn(mask_BSN, sorted_idxs_BSN)\n\n new_carry = (rng, token_idxs_BSN, new_mask_BSN, action_tokens_EL)\n return new_carry, None\n\n def generation_step_fn(\n carry: tuple[jax.Array, jax.Array], step_t: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array], None]:\n rng, current_token_idxs_BSN = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask_S = jnp.arange(seq_len) >= step_t\n mask_BSN = jnp.broadcast_to(mask_S[None, :, None], (B, seq_len, N)).astype(\n bool\n )\n masked_token_idxs_BSN = current_token_idxs_BSN * ~mask_BSN\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs_BSN,\n mask_BSN,\n action_tokens_EL,\n )\n final_carry_maskgit, _ = jax.lax.scan(\n maskgit_step_fn, init_carry_maskgit, jnp.arange(steps)\n )\n updated_token_idxs_BSN = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs_BSN)\n return new_carry, None\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs_BSN)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs_BSN = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC\n\n def sample_causal(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> jax.Array:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n # FIXME (f.srambical): reset spatial kv cache after each frame\n assert isinstance(self.dynamics, DynamicsCausal)\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n latent_actions_E = batch[""latent_actions""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n dynamics_causal: DynamicsCausal = self.dynamics\n\n for block in dynamics_causal.transformer.blocks:\n block.spatial_attention.init_cache((B * seq_len, (N + 1), self.dyna_dim), dtype=self.dtype)\n block.temporal_attention.init_cache((B * (N + 1), seq_len, self.dyna_dim), dtype=self.dtype)\n\n @nnx.jit\n def causal_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array], step_n: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array, jax.Array, jax.Array], None]:\n rng, token_idxs_BSN, action_tokens_EL, step_t = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # --- Construct + encode video ---\n vid_embed_BSNM = dynamics_causal.patch_embed(token_idxs_BSN)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = dynamics_causal.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1]))\n vid_embed_BSNp1M = jnp.concatenate([act_embed_BS1M, vid_embed_BSNM], axis=2)\n final_logits_BTNp1V = dynamics_causal.transformer(vid_embed_BSNp1M, (step_t, step_n)) / temperature\n final_logits_BV = final_logits_BTNp1V[:, step_t, step_n, :]\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_B = jnp.argmax(final_logits_BV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_B = jax.random.categorical(_rng, final_logits_BV)\n # Update next tokens only\n token_idxs_BSN = token_idxs_BSN.at[:, step_t, step_n].set(sampled_token_idxs_B)\n step_t += 1\n\n new_carry = (rng, token_idxs_BSN, action_tokens_EL, step_t)\n return new_carry, None\n\n # --- Run the autoregressive generation using a for loop ---\n rng = batch[""rng""]\n current_token_idxs_BSN = token_idxs_BSN\n \n for step_t in range(T, seq_len):\n rng, step_rng = jax.random.split(rng)\n\n # --- Reset spatial KV caches before each frame ---\n # for block in dynamics_causal.transformer.blocks:\n # block.spatial_attention.init_cache((B * seq_len, (N + 1), self.dyna_dim), dtype=self.dtype)\n #breakpoint()\n\n # --- Initialize and run causal loop ---\n init_carry_causal = (\n step_rng,\n current_token_idxs_BSN,\n action_tokens_EL,\n jnp.array(step_t, dtype=jnp.int32),\n )\n\n # current_token_idxs_BSN.block_until_ready()\n # start = time.time()\n final_carry_causal, _ = jax.lax.scan(\n causal_step_fn, init_carry_causal, jnp.arange(N)\n )\n # final_carry_causal[1].block_until_ready()\n # elapsed = time.time() - start\n # print(f""Autoregressive generation time: {elapsed:.4f}s"")\n # breakpoint()\n current_token_idxs_BSN = final_carry_causal[1]\n \n final_token_idxs_BSN = current_token_idxs_BSN\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC\n\n def vq_encode(self, batch: Dict[str, jax.Array], training: bool) -> jax.Array:\n # --- Preprocess videos ---\n video_BTHWC = batch[""videos""]\n lam_output = self.lam.vq_encode(video_BTHWC, training=training)\n lam_indices_E = lam_output[""indices""]\n return lam_indices_E\n\n# FIXME (f.srambical): add conversion script for old checkpoints\ndef restore_genie_components(\n optimizer: nnx.Optimizer,\n sharding: jax.sharding.NamedSharding,\n rng: jax.Array,\n args,\n) -> nnx.Optimizer:\n """"""Restore pre-trained Genie components""""""\n rngs = nnx.Rngs(rng)\n\n tx = optimizer.tx\n model = optimizer.model\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_tokenizer_optimizer = nnx.Optimizer(dummy_tokenizer, tx)\n dummy_tokenizer_optimizer_state = nnx.state(dummy_tokenizer_optimizer)\n abstract_sharded_tokenizer_optimizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_optimizer_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_tokenizer_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_tokenizer_optimizer.model, restored_tokenizer.model)\n model.tokenizer = dummy_tokenizer_optimizer.model\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_lam_optimizer = nnx.Optimizer(dummy_lam, tx)\n dummy_lam_optimizer_state = nnx.state(dummy_lam_optimizer)\n abstract_sharded_lam_optimizer_state = _create_abstract_sharded_pytree(\n dummy_lam_optimizer_state, sharding\n )\n restored_lam_optimizer = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_lam_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_lam_optimizer.model, restored_lam_optimizer.model)\n model.lam = dummy_lam_optimizer.model\n # Remove the LAM decoder to save memory and avoid unnecessary computation.\n del model.lam.decoder\n lam_checkpoint_manager.close()\n \n # Reinitialize the optimizer states\n optimizer = nnx.Optimizer(model, tx)\n return optimizer\n\n\ndef _create_abstract_sharded_pytree(\n pytree_template: nnx.GraphState, sharding_spec: jax.sharding.NamedSharding\n) -> jax.Array:\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab
|
6 |
+
5,7015,"experiments/sample.sh",0,0,"",shellscript,tab
|
7 |
+
6,38268,"genie.py",0,0,"",python,tab
|
8 |
+
7,105465,"genie.py",16512,1," ",python,selection_command
|
9 |
+
8,105575,"genie.py",16511,2,"# ",python,selection_command
|
10 |
+
9,105751,"genie.py",16511,2,"# ",python,selection_command
|
11 |
+
10,105915,"genie.py",16574,2,"",python,content
|
12 |
+
11,105915,"genie.py",16511,2,"",python,content
|
13 |
+
12,105926,"genie.py",16511,0,"",python,selection_command
|
14 |
+
13,292966,"genie.py",16514,0,"",python,selection_mouse
|
15 |
+
14,304442,"genie.py",14498,0,"",python,selection_command
|
16 |
+
15,307625,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\nimport math\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_type: str = ""maskgit""\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n """"""\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n # jax.distributed.initialize()\n\n rng = jax.random.key(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=True,\n rngs=rngs,\n )\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.Optimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n def _sampling_fn(model: Genie, batch: dict) -> jax.Array:\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n if args.dyna_type == ""maskgit"":\n return model.sample(\n batch,\n args.seq_len,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n )\n elif args.dyna_type == ""causal"":\n return model.sample_causal(\n batch,\n args.seq_len,\n args.temperature,\n args.sample_argmax,\n )\n else:\n raise ValueError(f""Invalid dynamics type: {args.dyna_type}"")\n\n # --- Define autoregressive sampling loop ---\n # FIXME (f.srambical): why is kv caching not working with nnx.jit?\n #@nnx.jit\n def _autoreg_sample(genie, rng, video_batch_BSHWC, action_batch_E):\n input_video_BTHWC = video_batch_BSHWC[:, : args.start_frame]\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=input_video_BTHWC, latent_actions=action_batch_E, rng=_rng)\n generated_vid_BSHWC = _sampling_fn(genie, batch)\n return generated_vid_BSHWC\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n # We don't use workers in order to avoid grain shutdown issues (https://github.com/google/grain/issues/398)\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = iter(dataloader)\n video_batch_BSHWC = next(dataloader)\n gt_video = jnp.asarray(video_batch_BSHWC, dtype=jnp.float32) / 255.0\n video_batch_BSHWC = gt_video.astype(args.dtype)\n # Get latent actions for all videos in the batch\n batch = dict(videos=video_batch_BSHWC)\n action_batch_E = genie.vq_encode(batch, training=False)\n\n # --- Sample + evaluate video ---\n # B, S, H, W, _ = video_batch_BSHWC.shape\n # N = math.ceil(H / args.patch_size) * math.ceil(W / args.patch_size)\n # for block in genie.dynamics.transformer.blocks:\n # block.spatial_attention.init_cache((B * S, 1, args.dyna_dim), dtype=args.dtype)\n # block.temporal_attention.init_cache((B * (N + 1), 1, args.dyna_dim), dtype=args.dtype)\n\n recon_video_BSHWC = _autoreg_sample(genie, rng, video_batch_BSHWC, action_batch_E)\n recon_video_BSHWC = recon_video_BSHWC.astype(jnp.float32)\n gt = gt_video[:, : recon_video_BSHWC.shape[1]].clip(0, 1).reshape(-1, *gt_video.shape[2:])\n recon = recon_video_BSHWC.clip(0, 1).reshape(-1, *recon_video_BSHWC.shape[2:])\n ssim = jnp.asarray(\n pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :])\n ).mean()\n print(f""SSIM: {ssim}"")\n\n # --- Construct video ---\n true_videos = (gt_video * 255).astype(np.uint8)\n pred_videos = (recon_video_BSHWC * 255).astype(np.uint8)\n video_comparison = np.zeros((2, *recon_video_BSHWC.shape), dtype=np.uint8)\n video_comparison[0] = true_videos[:, : args.seq_len]\n video_comparison[1] = pred_videos\n frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # --- Save video ---\n imgs = [Image.fromarray(img) for img in frames]\n # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n B, S, _, _, _ = video_batch_BSHWC.shape\n action_batch_BSm11 = jnp.reshape(action_batch_E, (B, S-1, 1))\n for t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch_BSm11.shape[0]):\n action = action_batch_BSm11[row, t, 0]\n y_offset = row * video_batch_BSHWC.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\n imgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n )\n",python,tab
|
17 |
+
16,307640,"sample.py",34,0,"",python,selection_command
|
18 |
+
17,307825,"sample.py",34,0,"t",python,content
|
19 |
+
18,307826,"sample.py",35,0,"",python,selection_keyboard
|
20 |
+
19,308512,"sample.py",34,1,"",python,content
|
21 |
+
20,311448,"sample.py",5133,0,"",python,selection_command
|
22 |
+
21,311804,"sample.py",5148,0,"",python,selection_command
|
23 |
+
22,311986,"sample.py",5133,0,"",python,selection_command
|
24 |
+
23,312412,"sample.py",5148,0,"",python,selection_command
|
25 |
+
24,313409,"genie.py",0,0,"",python,tab
|
26 |
+
25,319228,"genie.py",14493,0,"",python,selection_command
|
27 |
+
26,319668,"genie.py",14493,0,"#",python,content
|
28 |
+
27,319668,"genie.py",14494,0,"",python,selection_keyboard
|
29 |
+
28,320188,"genie.py",14493,0,"",python,selection_command
|
30 |
+
29,377277,"genie.py",14493,1,"",python,content
|
31 |
+
30,436763,"TERMINAL",0,0,"",,terminal_focus
|
32 |
+
31,438020,"TERMINAL",0,0,"source /home/franz.srambical/jafar/.venv/bin/activate",,terminal_command
|
33 |
+
32,438021,"TERMINAL",0,0,"]633;C]0;franz.srambical@hai-login1:~/jafar",,terminal_output
|
34 |
+
33,440321,"TERMINAL",0,0,"bash",,terminal_focus
|
35 |
+
34,557954,"genie.py",0,0,"",python,tab
|
36 |
+
35,558811,"genie.py",14510,0,"",python,selection_command
|
37 |
+
36,558914,"genie.py",14538,0,"",python,selection_command
|
38 |
+
37,558943,"genie.py",14626,0,"",python,selection_command
|
39 |
+
38,558975,"genie.py",14703,0,"",python,selection_command
|
40 |
+
39,558975,"genie.py",14769,0,"",python,selection_command
|
41 |
+
40,558975,"genie.py",14813,0,"",python,selection_command
|
42 |
+
41,558975,"genie.py",14848,0,"",python,selection_command
|
43 |
+
42,558975,"genie.py",14857,0,"",python,selection_command
|
44 |
+
43,559208,"genie.py",14904,0,"",python,selection_command
|
45 |
+
44,559208,"genie.py",14969,0,"",python,selection_command
|
46 |
+
45,559208,"genie.py",14978,0,"",python,selection_command
|
47 |
+
46,559208,"genie.py",15019,0,"",python,selection_command
|
48 |
+
47,559208,"genie.py",15098,0,"",python,selection_command
|
49 |
+
48,559208,"genie.py",15175,0,"",python,selection_command
|
50 |
+
49,559279,"genie.py",15254,0,"",python,selection_command
|
51 |
+
50,559280,"genie.py",15346,0,"",python,selection_command
|
52 |
+
51,559297,"genie.py",15435,0,"",python,selection_command
|
53 |
+
52,559333,"genie.py",15547,0,"",python,selection_command
|
54 |
+
53,559337,"genie.py",15611,0,"",python,selection_command
|
55 |
+
54,559367,"genie.py",15620,0,"",python,selection_command
|
56 |
+
55,559549,"genie.py",15676,0,"",python,selection_command
|
57 |
+
56,559597,"genie.py",15706,0,"",python,selection_command
|
58 |
+
57,559598,"genie.py",15782,0,"",python,selection_command
|
59 |
+
58,559598,"genie.py",15800,0,"",python,selection_command
|
60 |
+
59,559598,"genie.py",15850,0,"",python,selection_command
|
61 |
+
60,559642,"genie.py",15935,0,"",python,selection_command
|
62 |
+
61,559643,"genie.py",15973,0,"",python,selection_command
|
63 |
+
62,559652,"genie.py",16065,0,"",python,selection_command
|
64 |
+
63,559673,"genie.py",16081,0,"",python,selection_command
|
65 |
+
64,559685,"genie.py",16090,0,"",python,selection_command
|
66 |
+
65,559713,"genie.py",16162,0,"",python,selection_command
|
67 |
+
66,559714,"genie.py",16189,0,"",python,selection_command
|
68 |
+
67,559735,"genie.py",16198,0,"",python,selection_command
|
69 |
+
68,559839,"genie.py",16267,0,"",python,selection_command
|
70 |
+
69,559878,"genie.py",16294,0,"",python,selection_command
|
71 |
+
70,559917,"genie.py",16341,0,"",python,selection_command
|
72 |
+
71,559993,"genie.py",16351,0,"",python,selection_command
|
73 |
+
72,560246,"genie.py",16392,0,"",python,selection_command
|
74 |
+
73,560247,"genie.py",16434,0,"",python,selection_command
|
75 |
+
74,560247,"genie.py",16443,0,"",python,selection_command
|
76 |
+
75,560247,"genie.py",16507,0,"",python,selection_command
|
77 |
+
76,560271,"genie.py",16568,0,"",python,selection_command
|
78 |
+
77,560279,"genie.py",16676,0,"",python,selection_command
|
79 |
+
78,560881,"genie.py",16694,0,"",python,selection_command
|
80 |
+
79,560967,"genie.py",16703,0,"",python,selection_command
|
81 |
+
80,561002,"genie.py",16756,0,"",python,selection_command
|
82 |
+
81,561017,"genie.py",16790,0,"",python,selection_command
|
83 |
+
82,561038,"genie.py",16816,0,"",python,selection_command
|
84 |
+
83,561096,"genie.py",16856,0,"",python,selection_command
|
85 |
+
84,561475,"genie.py",16890,0,"",python,selection_command
|
86 |
+
85,561510,"genie.py",16942,0,"",python,selection_command
|
87 |
+
86,561536,"genie.py",16890,0,"",python,selection_command
|
88 |
+
87,562376,"genie.py",16856,0,"",python,selection_command
|
89 |
+
88,562840,"genie.py",16816,0,"",python,selection_command
|
90 |
+
89,562857,"genie.py",16856,0,"",python,selection_command
|
91 |
+
90,563111,"genie.py",16890,0,"",python,selection_command
|
92 |
+
91,563128,"genie.py",16942,0,"",python,selection_command
|
93 |
+
92,563206,"genie.py",16948,0,"",python,selection_command
|
94 |
+
93,563208,"genie.py",16957,0,"",python,selection_command
|
95 |
+
94,563239,"genie.py",17014,0,"",python,selection_command
|
96 |
+
95,563254,"genie.py",17048,0,"",python,selection_command
|
97 |
+
96,563255,"genie.py",17098,0,"",python,selection_command
|
98 |
+
97,563288,"genie.py",17163,0,"",python,selection_command
|
99 |
+
98,563409,"genie.py",17177,0,"",python,selection_command
|
100 |
+
99,563440,"genie.py",17163,0,"",python,selection_command
|
101 |
+
100,563576,"genie.py",17098,0,"",python,selection_command
|
102 |
+
101,563588,"genie.py",17048,0,"",python,selection_command
|
103 |
+
102,563920,"genie.py",17014,0,"",python,selection_command
|
104 |
+
103,563988,"genie.py",16957,0,"",python,selection_command
|
105 |
+
104,564001,"genie.py",16948,0,"",python,selection_command
|
106 |
+
105,564006,"genie.py",16942,0,"",python,selection_command
|
107 |
+
106,564028,"genie.py",16890,0,"",python,selection_command
|
108 |
+
107,564032,"genie.py",16856,0,"",python,selection_command
|
109 |
+
108,564319,"genie.py",16816,0,"",python,selection_command
|
110 |
+
109,564319,"genie.py",16790,0,"",python,selection_command
|
111 |
+
110,564333,"genie.py",16756,0,"",python,selection_command
|
112 |
+
111,564357,"genie.py",16703,0,"",python,selection_command
|
113 |
+
112,564357,"genie.py",16694,0,"",python,selection_command
|
114 |
+
113,564518,"genie.py",16676,0,"",python,selection_command
|
115 |
+
114,564650,"genie.py",16568,0,"",python,selection_command
|
116 |
+
115,565429,"genie.py",16507,0,"",python,selection_command
|
117 |
+
116,565576,"genie.py",16499,60," for block in dynamics_causal.transformer.blocks:",python,selection_command
|
118 |
+
117,565668,"genie.py",16499,168," for block in dynamics_causal.transformer.blocks:\n block.spatial_attention.init_cache((B * seq_len, (N + 1), self.dyna_dim), dtype=self.dtype)",python,selection_command
|
119 |
+
118,620912,"genie.py",16568,0,"",python,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0f6ed796-3fd2-4eb6-ad79-777c3b4353711750804480941-2025_06_25-00.35.32.827/source.csv
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,4,"train_lam.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n batch_size: int = 36\n vq_beta: float = 0.25\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n vq_reset_thresh: int = 50\n # LAM\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 6\n patch_size: int = 16\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.0\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n\n\nargs = tyro.cli(Args)\n\n\ndef lam_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n gt_future_frames = inputs[""videos""][:, 1:]\n mse = jnp.square(gt_future_frames - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = gt_future_frames.clip(0, 1).reshape(-1, *gt_future_frames.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n count_fn = jax.vmap(lambda i: (outputs[""indices""] == i).sum())\n index_counts = count_fn(jnp.arange(args.num_latents))\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=(index_counts != 0).mean(),\n )\n return loss, (outputs[""recon""], index_counts, metrics)\n\n\[email protected]\ndef train_step(state, inputs, action_last_active):\n # --- Update model ---\n rng, inputs[""rng""] = jax.random.split(inputs[""rng""])\n grad_fn = jax.value_and_grad(lam_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, idx_counts, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n\n # --- Reset inactive latent actions ---\n codebook = state.params[""params""][""vq""][""codebook""]\n num_codes = len(codebook)\n active_codes = idx_counts != 0.0\n action_last_active = jnp.where(active_codes, 0, action_last_active + 1)\n p_code = active_codes / active_codes.sum()\n reset_idxs = jax.random.choice(rng, num_codes, shape=(num_codes,), p=p_code)\n do_reset = action_last_active >= args.vq_reset_thresh\n new_codebook = jnp.where(\n jnp.expand_dims(do_reset, -1), codebook[reset_idxs], codebook\n )\n state.params[""params""][""vq""][""codebook""] = new_codebook\n action_last_active = jnp.where(do_reset, 0, action_last_active)\n return state, loss, recon, action_last_active, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n # Track when each action was last sampled\n action_last_active = jnp.zeros(args.num_latents)\n image_shape = (args.image_resolution, args.image_resolution, args.image_channels)\n rng, _rng = jax.random.split(rng)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = lam.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=lam.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n action_last_active = jax.device_put(action_last_active, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, action_last_active, metrics = train_step(\n train_state, inputs, action_last_active\n )\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0][1:]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""lam_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
3 |
+
2,3270,"TERMINAL",0,0,"/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
|
4 |
+
3,3288,"TERMINAL",0,0,"]633;E;/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;7bb61bf0-4c9f-4cd1-bf9e-c4e0d865bd72]633;C",,terminal_output
|
5 |
+
4,3315,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit02:/ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
|
6 |
+
5,45867,"train_lam.py",0,0,"",python,tab
|
7 |
+
6,45956,"train_lam.py",6991,0,"",python,selection_command
|
8 |
+
7,48071,"train_dynamics.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_resolution: int = 64\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""dropout_rng""]}\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_resolution, args.image_resolution, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n step = 0\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _mask_rng = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(\n videos=videos,\n dropout_rng=_rng,\n mask_rng=_mask_rng,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
9 |
+
8,48073,"train_dynamics.py",6425,0,"",python,selection_command
|
10 |
+
9,50180,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\ndef create_training_functions(args):\n """"""Create training functions with args captured in closure.""""""\n \n def tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n @jax.jit\n def train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n \n return tokenizer_loss_fn, train_step\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n \n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n tokenizer_loss_fn, train_step = create_training_functions(args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = sorted(\n [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n )\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n # --- Start timing from dataloading through training ---\n start_time = time.time()\n \n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n \n # --- End timing after train step ---\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, total time (dataloading + training): {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, ""step_time_ms"": elapsed_time, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n start_time = time.time()\n",python,tab
|
11 |
+
10,50183,"train_tokenizer.py",1438,0,"",python,selection_command
|
12 |
+
11,66302,"train_tokenizer.py",6963,0,"",python,selection_mouse
|
13 |
+
12,66638,"train_tokenizer.py",6945,64," # --- Start timing from dataloading through training ---",python,selection_command
|
14 |
+
13,66997,"train_tokenizer.py",6945,97," # --- Start timing from dataloading through training ---\n start_time = time.time()",python,selection_command
|
15 |
+
14,67184,"train_tokenizer.py",6945,106," # --- Start timing from dataloading through training ---\n start_time = time.time()\n ",python,selection_command
|
16 |
+
15,67335,"train_tokenizer.py",6945,107,"",python,content
|
17 |
+
16,67363,"train_tokenizer.py",6953,0,"",python,selection_command
|
18 |
+
17,68265,"train_tokenizer.py",6920,0,"",python,selection_command
|
19 |
+
18,69139,"Untitled-1",0,0,"",plaintext,tab
|
20 |
+
19,70100,"train_tokenizer.py",0,0,"",python,tab
|
21 |
+
20,73237,"train_tokenizer.py",0,0,"",python,tab
|
22 |
+
21,73944,"train_tokenizer.py",6858,0,"",python,selection_command
|
23 |
+
22,74236,"train_tokenizer.py",6877,0,"",python,selection_command
|
24 |
+
23,74412,"train_tokenizer.py",6929,0,"",python,selection_command
|
25 |
+
24,74667,"train_tokenizer.py",6945,0," # --- Start timing from dataloading through training ---\n start_time = time.time()\n \n",python,content
|
26 |
+
25,75782,"train_tokenizer.py",8633,0,"",python,selection_command
|
27 |
+
26,77250,"train_tokenizer.py",9326,0,"",python,selection_command
|
28 |
+
27,78385,"train_tokenizer.py",9289,0,"",python,selection_command
|
29 |
+
28,78632,"train_tokenizer.py",9267,0,"",python,selection_command
|
30 |
+
29,78660,"train_tokenizer.py",9228,0,"",python,selection_command
|
31 |
+
30,78684,"train_tokenizer.py",9210,0,"",python,selection_command
|
32 |
+
31,78716,"train_tokenizer.py",9169,0,"",python,selection_command
|
33 |
+
32,78754,"train_tokenizer.py",9143,0,"",python,selection_command
|
34 |
+
33,78794,"train_tokenizer.py",9055,0,"",python,selection_command
|
35 |
+
34,78823,"train_tokenizer.py",9014,0,"",python,selection_command
|
36 |
+
35,78848,"train_tokenizer.py",8946,0,"",python,selection_command
|
37 |
+
36,78896,"train_tokenizer.py",8871,0,"",python,selection_command
|
38 |
+
37,78920,"train_tokenizer.py",8825,0,"",python,selection_command
|
39 |
+
38,78984,"train_tokenizer.py",8768,0,"",python,selection_command
|
40 |
+
39,79008,"train_tokenizer.py",8726,0,"",python,selection_command
|
41 |
+
40,79028,"train_tokenizer.py",8704,0,"",python,selection_command
|
42 |
+
41,79056,"train_tokenizer.py",8677,0,"",python,selection_command
|
43 |
+
42,79082,"train_tokenizer.py",8605,0,"",python,selection_command
|
44 |
+
43,79128,"train_tokenizer.py",8554,0,"",python,selection_command
|
45 |
+
44,79152,"train_tokenizer.py",8485,0,"",python,selection_command
|
46 |
+
45,79187,"train_tokenizer.py",8419,0,"",python,selection_command
|
47 |
+
46,79217,"train_tokenizer.py",8380,0,"",python,selection_command
|
48 |
+
47,79263,"train_tokenizer.py",8358,0,"",python,selection_command
|
49 |
+
48,79291,"train_tokenizer.py",8289,0,"",python,selection_command
|
50 |
+
49,79323,"train_tokenizer.py",8234,0,"",python,selection_command
|
51 |
+
50,79368,"train_tokenizer.py",8152,0,"",python,selection_command
|
52 |
+
51,79391,"train_tokenizer.py",8100,0,"",python,selection_command
|
53 |
+
52,79416,"train_tokenizer.py",8051,0,"",python,selection_command
|
54 |
+
53,79460,"train_tokenizer.py",7995,0,"",python,selection_command
|
55 |
+
54,79491,"train_tokenizer.py",7894,0,"",python,selection_command
|
56 |
+
55,79529,"train_tokenizer.py",7844,0,"",python,selection_command
|
57 |
+
56,79561,"train_tokenizer.py",7790,0,"",python,selection_command
|
58 |
+
57,79588,"train_tokenizer.py",7760,0,"",python,selection_command
|
59 |
+
58,79633,"train_tokenizer.py",7759,0,"",python,selection_command
|
60 |
+
59,79656,"train_tokenizer.py",7737,0,"",python,selection_command
|
61 |
+
60,79692,"train_tokenizer.py",7634,0,"",python,selection_command
|
62 |
+
61,79724,"train_tokenizer.py",7573,0,"",python,selection_command
|
63 |
+
62,79763,"train_tokenizer.py",7523,0,"",python,selection_command
|
64 |
+
63,79791,"train_tokenizer.py",7510,0,"",python,selection_command
|
65 |
+
64,79832,"train_tokenizer.py",7430,0,"",python,selection_command
|
66 |
+
65,79864,"train_tokenizer.py",7379,0,"",python,selection_command
|
67 |
+
66,79892,"train_tokenizer.py",7378,0,"",python,selection_command
|
68 |
+
67,79931,"train_tokenizer.py",7293,0,"",python,selection_command
|
69 |
+
68,79967,"train_tokenizer.py",7279,0,"",python,selection_command
|
70 |
+
69,79996,"train_tokenizer.py",7211,0,"",python,selection_command
|
71 |
+
70,80032,"train_tokenizer.py",7166,0,"",python,selection_command
|
72 |
+
71,80068,"train_tokenizer.py",7165,0,"",python,selection_command
|
73 |
+
72,80105,"train_tokenizer.py",7119,0,"",python,selection_command
|
74 |
+
73,80143,"train_tokenizer.py",7086,0,"",python,selection_command
|
75 |
+
74,80169,"train_tokenizer.py",7052,0,"",python,selection_command
|
76 |
+
75,80207,"train_tokenizer.py",7043,0,"",python,selection_command
|
77 |
+
76,80236,"train_tokenizer.py",7010,0,"",python,selection_command
|
78 |
+
77,82753,"train_tokenizer.py",7056,29,"",python,content
|
79 |
+
78,82753,"train_tokenizer.py",7051,1,"",python,content
|
80 |
+
79,82753,"train_tokenizer.py",7018,0," ",python,content
|
81 |
+
80,82754,"train_tokenizer.py",6953,0,"for videos in dataloader:\n ",python,content
|
82 |
+
81,84196,"train_tokenizer.py",6979,0,"",python,selection_command
|
83 |
+
82,84277,"train_tokenizer.py",6945,0,"",python,selection_command
|
84 |
+
83,84383,"train_tokenizer.py",6912,0,"",python,selection_command
|
85 |
+
84,84557,"train_tokenizer.py",6860,0,"",python,selection_command
|
86 |
+
85,84850,"train_tokenizer.py",6912,0,"",python,selection_command
|
87 |
+
86,85901,"train_tokenizer.py",6945,0,"",python,selection_command
|
88 |
+
87,86042,"train_tokenizer.py",6979,0,"",python,selection_command
|
89 |
+
88,86902,"train_tokenizer.py",7048,0,"",python,selection_command
|
90 |
+
89,87549,"train_tokenizer.py",7085,0,"",python,selection_command
|
91 |
+
90,87964,"train_tokenizer.py",7048,0,"",python,selection_command
|
92 |
+
91,88416,"train_tokenizer.py",7085,0,"",python,selection_command
|
93 |
+
92,88681,"train_tokenizer.py",7085,12," ",python,selection_command
|
94 |
+
93,88772,"train_tokenizer.py",7048,49," start_time = time.time()\n ",python,selection_command
|
95 |
+
94,88911,"train_tokenizer.py",6979,118," # --- Start timing from dataloading through training ---\n start_time = time.time()\n ",python,selection_command
|
96 |
+
95,89022,"train_tokenizer.py",6979,119,"",python,content
|
97 |
+
96,89036,"train_tokenizer.py",6991,0,"",python,selection_command
|
98 |
+
97,89860,"train_tokenizer.py",6957,0,"",python,selection_command
|
99 |
+
98,90316,"train_tokenizer.py",6924,0,"",python,selection_command
|
100 |
+
99,90320,"train_tokenizer.py",6957,0,"",python,selection_command
|
101 |
+
100,91443,"train_tokenizer.py",6991,0,"",python,selection_command
|
102 |
+
101,91638,"train_tokenizer.py",7024,0,"",python,selection_command
|
103 |
+
102,92015,"train_tokenizer.py",6991,0,"",python,selection_command
|
104 |
+
103,93099,"train_tokenizer.py",7024,0,"",python,selection_command
|
105 |
+
104,93348,"train_tokenizer.py",7058,0,"",python,selection_command
|
106 |
+
105,93365,"train_tokenizer.py",7071,0,"",python,selection_command
|
107 |
+
106,93407,"train_tokenizer.py",7116,0,"",python,selection_command
|
108 |
+
107,93439,"train_tokenizer.py",7184,0,"",python,selection_command
|
109 |
+
108,93465,"train_tokenizer.py",7198,0,"",python,selection_command
|
110 |
+
109,93504,"train_tokenizer.py",7271,0,"",python,selection_command
|
111 |
+
110,93536,"train_tokenizer.py",7284,0,"",python,selection_command
|
112 |
+
111,93568,"train_tokenizer.py",7335,0,"",python,selection_command
|
113 |
+
112,93608,"train_tokenizer.py",7414,0,"",python,selection_command
|
114 |
+
113,93644,"train_tokenizer.py",7428,0,"",python,selection_command
|
115 |
+
114,93956,"train_tokenizer.py",7414,0,"",python,selection_command
|
116 |
+
115,94133,"train_tokenizer.py",7403,12," ",python,selection_command
|
117 |
+
116,94339,"train_tokenizer.py",7403,62," \n # --- End timing after train step ---",python,selection_command
|
118 |
+
117,94588,"train_tokenizer.py",7403,123," \n # --- End timing after train step ---\n elapsed_time = (time.time() - start_time) * 1000",python,selection_command
|
119 |
+
118,94948,"train_tokenizer.py",7403,226," \n # --- End timing after train step ---\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, total time (dataloading + training): {elapsed_time}ms"")",python,selection_command
|
120 |
+
119,96750,"train_tokenizer.py",7403,227,"",python,content
|
121 |
+
120,96764,"train_tokenizer.py",7415,0,"",python,selection_command
|
122 |
+
121,97612,"train_tokenizer.py",7335,0,"",python,selection_command
|
123 |
+
122,98524,"train_tokenizer.py",7425,0,"",python,selection_command
|
124 |
+
123,98916,"train_tokenizer.py",7438,0,"",python,selection_command
|
125 |
+
124,99189,"train_tokenizer.py",7468,0,"",python,selection_command
|
126 |
+
125,99193,"train_tokenizer.py",7522,0,"",python,selection_command
|
127 |
+
126,99320,"train_tokenizer.py",7572,0,"",python,selection_command
|
128 |
+
127,100625,"train_tokenizer.py",7619,30,"",python,content
|
129 |
+
128,101864,"train_tokenizer.py",7522,0,"",python,selection_command
|
130 |
+
129,102947,"train_tokenizer.py",7572,0,"",python,selection_command
|
131 |
+
130,103688,"train_tokenizer.py",8962,0,"",python,selection_command
|
132 |
+
131,104057,"train_tokenizer.py",8925,0,"",python,selection_command
|
133 |
+
132,104325,"train_tokenizer.py",8925,37,"",python,content
|
134 |
+
133,105007,"train_tokenizer.py",8903,0,"",python,selection_command
|
135 |
+
134,106721,"train_tokenizer.py",7438,0,"",python,selection_command
|
136 |
+
135,107443,"train_tokenizer.py",7425,0,"",python,selection_command
|
137 |
+
136,107693,"train_tokenizer.py",7415,0,"",python,selection_command
|
138 |
+
137,107697,"train_tokenizer.py",7335,0,"",python,selection_command
|
139 |
+
138,107728,"train_tokenizer.py",7284,0,"",python,selection_command
|
140 |
+
139,107768,"train_tokenizer.py",7271,0,"",python,selection_command
|
141 |
+
140,107833,"train_tokenizer.py",7198,0,"",python,selection_command
|
142 |
+
141,107836,"train_tokenizer.py",7184,0,"",python,selection_command
|
143 |
+
142,107875,"train_tokenizer.py",7116,0,"",python,selection_command
|
144 |
+
143,107896,"train_tokenizer.py",7071,0,"",python,selection_command
|
145 |
+
144,107939,"train_tokenizer.py",7058,0,"",python,selection_command
|
146 |
+
145,107964,"train_tokenizer.py",7024,0,"",python,selection_command
|
147 |
+
146,109973,"train_tokenizer.py",0,0,"",python,tab
|
148 |
+
147,109976,"train_tokenizer.py",1438,0,"",python,selection_command
|
149 |
+
148,119550,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = sorted(\n [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n )\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log and jax.process_index() == 0:\n if step % args.log_interval == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
150 |
+
149,119551,"train_tokenizer.py",7012,0,"",python,selection_mouse
|
151 |
+
150,119564,"train_tokenizer.py",7011,0,"",python,selection_command
|
152 |
+
151,121459,"train_tokenizer.py",0,0,"",python,tab
|
153 |
+
152,121459,"train_tokenizer.py",7341,0,"",python,selection_mouse
|
154 |
+
153,121994,"train_tokenizer.py",7402,0,"\n ",python,content
|
155 |
+
154,122073,"train_tokenizer.py",7403,12,"",python,content
|
156 |
+
155,122161,"train_tokenizer.py",7403,0,"\n print(f""Step {step}, loss: {loss}"")",python,content
|
157 |
+
156,122186,"train_tokenizer.py",7416,0,"",python,selection_command
|
158 |
+
157,122520,"train_tokenizer.py",7403,0,"",python,selection_command
|
159 |
+
158,122857,"train_tokenizer.py",7403,1,"",python,content
|
160 |
+
159,122864,"train_tokenizer.py",7415,0,"",python,selection_command
|
161 |
+
160,123914,"train_tokenizer.py",7335,0,"",python,selection_command
|
162 |
+
161,125957,"train_tokenizer.py",0,0,"",python,tab
|
163 |
+
162,126804,"train_tokenizer.py",7058,0,"",python,selection_command
|
164 |
+
163,126945,"train_tokenizer.py",7071,0,"",python,selection_command
|
165 |
+
164,127208,"train_tokenizer.py",7116,0,"",python,selection_command
|
166 |
+
165,127246,"train_tokenizer.py",7184,0,"",python,selection_command
|
167 |
+
166,127259,"train_tokenizer.py",7198,0,"",python,selection_command
|
168 |
+
167,127300,"train_tokenizer.py",7271,0,"",python,selection_command
|
169 |
+
168,127332,"train_tokenizer.py",7284,0,"",python,selection_command
|
170 |
+
169,127355,"train_tokenizer.py",7335,0,"",python,selection_command
|
171 |
+
170,127547,"train_tokenizer.py",7415,0,"",python,selection_command
|
172 |
+
171,128816,"train_tokenizer.py",7335,0,"",python,selection_command
|
173 |
+
172,129954,"train_dynamics.py",0,0,"",python,tab
|
174 |
+
173,135187,"train_tokenizer.py",0,0,"",python,tab
|
175 |
+
174,135189,"train_tokenizer.py",1438,0,"",python,selection_command
|
176 |
+
175,163796,"train_tokenizer.py",1542,2164,"\n def tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n @jax.jit\n def train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n return tokenizer_loss_fn, train_step\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n\n",python,content
|
177 |
+
176,165840,"vscode.git.Git",0,0,"2025-06-25 00:35:32.896 [info] [main] Log level: Info\n2025-06-25 00:35:32.897 [info] [main] Validating found git in: ""git""\n2025-06-25 00:35:32.897 [info] [main] Using git ""2.43.5"" from ""git""\n2025-06-25 00:35:32.897 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [9ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [25ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --git-dir --git-common-dir [10ms]\n2025-06-25 00:35:32.897 [info] [Model][openRepository] Opened repository: /lustre/groups/haicu/workspace/franz.srambical/jafar\n2025-06-25 00:35:32.897 [info] > git config --get commit.template [27ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [13ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [5ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [20ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [18ms]\n2025-06-25 00:35:32.897 [info] > git status -z -uall [16ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [6ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [14ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [13ms]\n2025-06-25 00:35:32.897 [info] > git config --get commit.template [8ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [12ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [7ms]\n2025-06-25 00:35:32.897 [info] > git config --local branch.seeded-episode-sampling.vscode-merge-base [16ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --path-format=relative --show-toplevel [9ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:35:32.897 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/origin/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [955ms]\n2025-06-25 00:35:32.897 [info] > git rev-parse --show-toplevel [957ms]\n2025-06-25 00:35:32.898 [info] > git merge-base refs/heads/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [31ms]\n2025-06-25 00:35:32.907 [info] > git rev-parse --path-format=relative --show-toplevel [33ms]\n2025-06-25 00:35:32.911 [info] > git status -z -uall [27ms]\n2025-06-25 00:35:32.921 [info] > git diff --name-status -z --diff-filter=ADMR 9e8a3860d8270b2dbf00311f48a6f8bf621b1aca...refs/remotes/origin/seeded-episode-sampling [14ms]\n2025-06-25 00:35:32.921 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [28ms]\n2025-06-25 00:35:32.926 [info] > git rev-parse --show-toplevel [6ms]\n2025-06-25 00:35:32.946 [info] > git rev-parse --path-format=relative --show-toplevel [15ms]\n2025-06-25 00:35:32.955 [info] > git rev-parse --show-toplevel [4ms]\n2025-06-25 00:35:32.967 [info] > git rev-parse --path-format=relative --show-toplevel [6ms]\n2025-06-25 00:35:32.984 [info] > git rev-parse --show-toplevel [10ms]\n2025-06-25 00:35:32.996 [info] > git rev-parse --path-format=relative --show-toplevel [6ms]\n2025-06-25 00:35:33.014 [info] > git rev-parse --show-toplevel [10ms]\n2025-06-25 00:35:33.026 [info] > git rev-parse --path-format=relative --show-toplevel [5ms]\n2025-06-25 00:35:33.037 [info] > git rev-parse --show-toplevel [4ms]\n2025-06-25 00:35:33.052 [info] > git rev-parse --path-format=relative --show-toplevel [5ms]\n2025-06-25 00:35:33.055 [info] [Model][doInitialScan] Initial repository scan completed - repositories (1), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-25 00:35:33.339 [info] > git fetch [1579ms]\n2025-06-25 00:35:33.365 [info] > git config --get commit.template [16ms]\n2025-06-25 00:35:33.373 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [18ms]\n2025-06-25 00:35:33.491 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [118ms]\n2025-06-25 00:35:33.537 [info] > git check-ignore -v -z --stdin [53ms]\n2025-06-25 00:35:33.539 [info] > git show --textconv :train_lam.py [39ms]\n2025-06-25 00:35:33.540 [info] > git status -z -uall [27ms]\n2025-06-25 00:35:33.542 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_lam.py [35ms]\n2025-06-25 00:35:33.554 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [33ms]\n2025-06-25 00:35:33.682 [info] > git cat-file -s 146d6b28f77470c22c6c6e602314c0eab8b3ddc7 [130ms]\n2025-06-25 00:35:34.083 [info] > git blame --root --incremental 474b9286d6d5b184c2eaaaf0bb077e1fa37d17fd -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_lam.py [73ms]\n2025-06-25 00:35:36.240 [info] > git config --local branch.seeded-episode-sampling.github-pr-owner-number [7ms]\n2025-06-25 00:35:36.241 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-25 00:35:38.986 [info] > git config --global user.name [5ms]\n2025-06-25 00:35:38.996 [info] > git config --global user.email [3ms]\n2025-06-25 00:35:38.996 [info] [main] Stored git author name in global state: Franz Srambical <[email protected]>\n2025-06-25 00:36:17.120 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [67ms]\n2025-06-25 00:36:18.319 [info] > git show --textconv HEAD:train_lam.py [12ms]\n2025-06-25 00:36:18.326 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_lam.py [11ms]\n2025-06-25 00:36:20.578 [info] > git show --textconv HEAD:train_dynamics.py [13ms]\n2025-06-25 00:36:20.585 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_dynamics.py [11ms]\n2025-06-25 00:36:20.930 [info] > git blame --root --incremental 474b9286d6d5b184c2eaaaf0bb077e1fa37d17fd -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_dynamics.py [19ms]\n2025-06-25 00:36:22.669 [info] > git show --textconv HEAD:train_tokenizer.py [13ms]\n2025-06-25 00:36:22.675 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:36:23.042 [info] > git blame --root --incremental 474b9286d6d5b184c2eaaaf0bb077e1fa37d17fd -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [22ms]\n2025-06-25 00:36:42.950 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n2025-06-25 00:36:46.390 [info] > git show --textconv :train_tokenizer.py [12ms]\n2025-06-25 00:36:46.396 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [9ms]\n2025-06-25 00:36:46.410 [info] > git cat-file -s 78a39cc762967d4e4d8d4b77d06436da1d5dfcab [7ms]\n2025-06-25 00:37:07.312 [info] > git config --get commit.template [11ms]\n2025-06-25 00:37:07.324 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:37:07.352 [info] > git status -z -uall [16ms]\n2025-06-25 00:37:07.368 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:37:20.452 [info] > git config --get commit.template [11ms]\n2025-06-25 00:37:20.466 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:37:20.490 [info] > git status -z -uall [15ms]\n2025-06-25 00:37:20.502 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [21ms]\n2025-06-25 00:37:22.734 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [9ms]\n2025-06-25 00:37:32.415 [info] > git blame --root --incremental 474b9286d6d5b184c2eaaaf0bb077e1fa37d17fd -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [25ms]\n2025-06-25 00:37:39.144 [info] > git show --textconv :train_tokenizer.py [11ms]\n2025-06-25 00:37:39.150 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [7ms]\n2025-06-25 00:37:39.163 [info] > git cat-file -s 78a39cc762967d4e4d8d4b77d06436da1d5dfcab [5ms]\n2025-06-25 00:37:43.191 [info] > git show --textconv :train_dynamics.py [25ms]\n2025-06-25 00:37:43.207 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_dynamics.py [29ms]\n2025-06-25 00:37:43.222 [info] > git cat-file -s dbc227d8fd3182f9fee05ea8e52a1069189bde1d [6ms]\n2025-06-25 00:37:43.816 [info] > git config --get commit.template [10ms]\n2025-06-25 00:37:43.830 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:37:43.851 [info] > git status -z -uall [13ms]\n2025-06-25 00:37:43.872 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n2025-06-25 00:37:45.271 [info] > git config --get commit.template [11ms]\n2025-06-25 00:37:45.283 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:37:45.306 [info] > git status -z -uall [14ms]\n2025-06-25 00:37:45.321 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:37:46.652 [info] > git show --textconv HEAD:train_tokenizer.py [13ms]\n2025-06-25 00:37:46.661 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [13ms]\n2025-06-25 00:37:56.987 [info] > git add -A -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [14ms]\n2025-06-25 00:37:57.008 [info] > git config --get commit.template [8ms]\n2025-06-25 00:37:57.023 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:37:57.049 [info] > git status -z -uall [16ms]\n2025-06-25 00:37:57.062 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [21ms]\n2025-06-25 00:37:57.289 [info] > git diff --no-color [19ms]\n2025-06-25 00:37:57.766 [info] > git show --textconv HEAD:train_tokenizer.py [20ms]\n2025-06-25 00:37:57.767 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:37:58.253 [info] > git config --get commit.template [10ms]\n2025-06-25 00:37:58.267 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:37:58.294 [info] > git status -z -uall [15ms]\n2025-06-25 00:37:58.309 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:37:58.542 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [9ms]\n2025-06-25 00:37:58.553 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n2025-06-25 00:37:58.560 [info] > git cat-file -s 08294a61b35de2a5fa21c8e79aea18283358ceb6 [9ms]\n2025-06-25 00:37:58.760 [info] > git show --textconv HEAD:train_tokenizer.py [19ms]\n2025-06-25 00:37:58.769 [info] > git show --textconv :train_tokenizer.py [9ms]\n2025-06-25 00:38:03.351 [info] > git config --get commit.template [24ms]\n2025-06-25 00:38:03.354 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:38:03.381 [info] > git status -z -uall [18ms]\n2025-06-25 00:38:03.389 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [18ms]\n2025-06-25 00:38:15.274 [info] > git -c user.useConfigOnly=true commit --quiet --allow-empty-message --file - [1067ms]\n2025-06-25 00:38:15.274 [info] [WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804694-3444258.\nblack....................................................................Failed\n- hook id: black\n- files were modified by this hook\n\nreformatted train_tokenizer.py\n\nAll done! ✨ 🍰 ✨\n1 file reformatted.\n\n[INFO] Restored changes from /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804694-3444258.\n2025-06-25 00:38:15.291 [info] > git config --get-all user.name [7ms]\n2025-06-25 00:38:15.303 [info] > git config --get-all user.email [5ms]\n2025-06-25 00:38:15.321 [info] > git config --get commit.template [9ms]\n2025-06-25 00:38:15.335 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:38:15.362 [info] > git status -z -uall [18ms]\n2025-06-25 00:38:15.376 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n2025-06-25 00:38:16.289 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [9ms]\n2025-06-25 00:38:16.296 [info] > git diff --no-color [29ms]\n2025-06-25 00:38:16.299 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:38:16.303 [info] > git cat-file -s 08294a61b35de2a5fa21c8e79aea18283358ceb6 [8ms]\n2025-06-25 00:38:16.601 [info] > git show --textconv HEAD:train_tokenizer.py [12ms]\n2025-06-25 00:38:16.608 [info] > git show --textconv :train_tokenizer.py [10ms]\n2025-06-25 00:38:17.293 [info] > git config --get commit.template [12ms]\n2025-06-25 00:38:17.308 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:38:17.345 [info] > git status -z -uall [26ms]\n2025-06-25 00:38:17.346 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [8ms]\n2025-06-25 00:38:17.355 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:38:17.356 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [28ms]\n2025-06-25 00:38:17.363 [info] > git cat-file -s 08294a61b35de2a5fa21c8e79aea18283358ceb6 [9ms]\n2025-06-25 00:38:17.567 [info] > git show --textconv HEAD:train_tokenizer.py [13ms]\n2025-06-25 00:38:17.577 [info] > git show --textconv :train_tokenizer.py [11ms]\n",log,tab
|
178 |
+
177,168360,"vscode.git.Git",16332,0,"2025-06-25 00:38:20.780 [info] > git add -A -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [32ms]\n2025-06-25 00:38:20.802 [info] > git config --get commit.template [10ms]\n2025-06-25 00:38:20.817 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:38:20.848 [info] > git status -z -uall [19ms]\n2025-06-25 00:38:20.859 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [21ms]\n",log,content
|
179 |
+
178,168546,"vscode.git.Git",16973,0,"2025-06-25 00:38:21.077 [info] > git diff --no-color [15ms]\n",log,content
|
180 |
+
179,169868,"vscode.git.Git",17033,0,"2025-06-25 00:38:22.320 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [10ms]\n2025-06-25 00:38:22.331 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n2025-06-25 00:38:22.341 [info] > git cat-file -s e0b223487cd0fd5e6b69aae9a58441a50592c4db [10ms]\n2025-06-25 00:38:22.379 [info] > git config --get commit.template [10ms]\n2025-06-25 00:38:22.392 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:38:22.419 [info] > git status -z -uall [17ms]\n2025-06-25 00:38:22.434 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n",log,content
|
181 |
+
180,170093,"vscode.git.Git",17916,0,"2025-06-25 00:38:22.531 [info] > git show --textconv HEAD:train_tokenizer.py [11ms]\n2025-06-25 00:38:22.539 [info] > git show --textconv :train_tokenizer.py [9ms]\n",log,content
|
182 |
+
181,171073,"vscode.git.Git",18079,0,"2025-06-25 00:38:23.538 [info] > git -c user.useConfigOnly=true commit --quiet --allow-empty-message --file - [739ms]\n2025-06-25 00:38:23.538 [info] [WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804703-3444332.\nblack....................................................................Passed\n[INFO] Restored changes from /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804703-3444332.\n2025-06-25 00:38:23.555 [info] > git config --get commit.template [6ms]\n2025-06-25 00:38:23.576 [info] > git config --get commit.template [10ms]\n2025-06-25 00:38:23.592 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [17ms]\n2025-06-25 00:38:23.622 [info] > git status -z -uall [18ms]\n2025-06-25 00:38:23.637 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [24ms]\n2025-06-25 00:38:23.664 [info] > git merge-base refs/heads/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [12ms]\n2025-06-25 00:38:23.684 [info] > git diff --name-status -z --diff-filter=ADMR 9e8a3860d8270b2dbf00311f48a6f8bf621b1aca...refs/remotes/origin/seeded-episode-sampling [12ms]\n",log,content
|
183 |
+
182,171252,"vscode.git.Git",19455,0,"2025-06-25 00:38:23.889 [info] > git config --local branch.seeded-episode-sampling.github-pr-owner-number [11ms]\n2025-06-25 00:38:23.889 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-25 00:38:23.890 [info] > git diff --no-color [20ms]\n",log,content
|
184 |
+
183,171440,"vscode.git.Git",19717,0,"2025-06-25 00:38:23.900 [info] > git config --get commit.template [11ms]\n2025-06-25 00:38:23.913 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [14ms]\n2025-06-25 00:38:23.929 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [67ms]\n2025-06-25 00:38:23.931 [info] > git status -z -uall [12ms]\n2025-06-25 00:38:23.950 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n",log,content
|
185 |
+
184,172680,"vscode.git.Git",20417,0,"2025-06-25 00:38:25.145 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n2025-06-25 00:38:25.153 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/train_tokenizer.py [11ms]\n",log,content
|
186 |
+
185,172860,"vscode.git.Git",20687,0,"2025-06-25 00:38:25.349 [info] > git show --textconv HEAD:train_tokenizer.py [17ms]\n2025-06-25 00:38:25.360 [info] > git show --textconv HEAD:train_tokenizer.py [12ms]\n",log,content
|
187 |
+
186,173049,"train_tokenizer.py",0,0,"",python,tab
|
188 |
+
187,173310,"train_dynamics.py",0,0,"",python,tab
|
189 |
+
188,173311,"train_dynamics.py",6425,0,"",python,selection_command
|
190 |
+
189,174394,"train_lam.py",0,0,"",python,tab
|
191 |
+
190,174395,"train_lam.py",6991,0,"",python,selection_command
|
192 |
+
191,175731,"utils/nn.py",0,0,"import math\nfrom typing import Dict, Tuple\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nn.Module):\n dim: int\n num_heads: int\n dropout: float\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n deterministic=False,\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm()(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n deterministic=False,\n )(z, mask=causal_mask)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm()(x)\n z = nn.Dense(self.dim)(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(),\n nn.Dense(self.model_dim),\n nn.LayerNorm(),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n )(x)\n x = nn.Dense(self.out_dim)(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(self, x: jax.Array, training: bool) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n distance = self.drop(distance)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n",python,tab
|
193 |
+
192,175732,"utils/nn.py",1420,0,"",python,selection_command
|
194 |
+
193,204437,"utils/nn.py",3209,107," def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n",python,content
|
195 |
+
194,206308,"vscode.git.Git",0,0,"",log,tab
|
196 |
+
195,209498,"vscode.git.Git",28469,0,"2025-06-25 00:39:01.714 [info] > git config --get commit.template [10ms]\n2025-06-25 00:39:01.729 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:39:01.757 [info] > git status -z -uall [17ms]\n2025-06-25 00:39:01.773 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n",log,content
|
197 |
+
196,209889,"vscode.git.Git",28984,0,"2025-06-25 00:39:02.353 [info] > git add -A -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [15ms]\n2025-06-25 00:39:02.373 [info] > git config --get commit.template [9ms]\n2025-06-25 00:39:02.390 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [17ms]\n2025-06-25 00:39:02.417 [info] > git status -z -uall [18ms]\n2025-06-25 00:39:02.431 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [24ms]\n",log,content
|
198 |
+
197,210096,"vscode.git.Git",29617,0,"2025-06-25 00:39:02.656 [info] > git diff --no-color [16ms]\n",log,content
|
199 |
+
198,211618,"vscode.git.Git",29677,0,"2025-06-25 00:39:03.936 [info] > git ls-files --stage -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [13ms]\n2025-06-25 00:39:03.946 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [15ms]\n2025-06-25 00:39:03.955 [info] > git cat-file -s 2b106612ba0c4615ee46876eff4e52dcf8d121aa [10ms]\n",log,content
|
200 |
+
199,211837,"vscode.git.Git",30031,0,"2025-06-25 00:39:04.173 [info] > git show --textconv HEAD:utils/nn.py [11ms]\n2025-06-25 00:39:04.183 [info] > git show --textconv :utils/nn.py [10ms]\n",log,content
|
201 |
+
200,212529,"vscode.git.Git",30181,0,"2025-06-25 00:39:04.912 [info] > git -c user.useConfigOnly=true commit --quiet --allow-empty-message --file - [767ms]\n2025-06-25 00:39:04.912 [info] [WARNING] Unstaged files detected.\n[INFO] Stashing unstaged files to /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804744-3445150.\nblack....................................................................Passed\n[INFO] Restored changes from /ictstr01/home/aih/franz.srambical/.cache/pre-commit/patch1750804744-3445150.\n2025-06-25 00:39:04.927 [info] > git config --get commit.template [7ms]\n2025-06-25 00:39:04.941 [info] > git config --get commit.template [6ms]\n2025-06-25 00:39:04.958 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:39:04.984 [info] > git status -z -uall [17ms]\n2025-06-25 00:39:04.991 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [19ms]\n2025-06-25 00:39:05.007 [info] > git merge-base refs/heads/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [8ms]\n2025-06-25 00:39:05.024 [info] > git diff --name-status -z --diff-filter=ADMR 9e8a3860d8270b2dbf00311f48a6f8bf621b1aca...refs/remotes/origin/seeded-episode-sampling [10ms]\n",log,content
|
202 |
+
201,212895,"vscode.git.Git",31555,0,"2025-06-25 00:39:05.347 [info] > git config --local branch.seeded-episode-sampling.github-pr-owner-number [12ms]\n2025-06-25 00:39:05.347 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-25 00:39:05.348 [info] > git diff --no-color [21ms]\n2025-06-25 00:39:05.356 [info] > git config --get commit.template [9ms]\n2025-06-25 00:39:05.370 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [15ms]\n2025-06-25 00:39:05.397 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [78ms]\n2025-06-25 00:39:05.400 [info] > git status -z -uall [19ms]\n2025-06-25 00:39:05.415 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n",log,content
|
203 |
+
202,214221,"vscode.git.Git",32516,0,"2025-06-25 00:39:06.641 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [10ms]\n2025-06-25 00:39:06.649 [info] > git ls-tree -l HEAD -- /lustre/groups/haicu/workspace/franz.srambical/jafar/utils/nn.py [9ms]\n2025-06-25 00:39:06.797 [info] > git config --get commit.template [10ms]\n2025-06-25 00:39:06.812 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [16ms]\n2025-06-25 00:39:06.837 [info] > git status -z -uall [15ms]\n",log,content
|
204 |
+
203,214432,"vscode.git.Git",33158,0,"2025-06-25 00:39:06.848 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [20ms]\n2025-06-25 00:39:06.861 [info] > git show --textconv HEAD:utils/nn.py [14ms]\n2025-06-25 00:39:06.894 [info] > git show --textconv HEAD:utils/nn.py [13ms]\n",log,content
|
205 |
+
204,219260,"vscode.git.Git",33440,0,"2025-06-25 00:39:11.695 [info] > git push origin seeded-episode-sampling:seeded-episode-sampling [1503ms]\n2025-06-25 00:39:11.695 [info] To github.com:p-doom/jafar.git\n 9e8a386..917a739 seeded-episode-sampling -> seeded-episode-sampling\n2025-06-25 00:39:11.719 [info] > git config --get commit.template [12ms]\n2025-06-25 00:39:11.730 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [12ms]\n2025-06-25 00:39:11.750 [info] > git status -z -uall [13ms]\n2025-06-25 00:39:11.770 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [23ms]\n2025-06-25 00:39:11.796 [info] > git merge-base refs/heads/seeded-episode-sampling refs/remotes/origin/seeded-episode-sampling [11ms]\n2025-06-25 00:39:11.817 [info] > git diff --name-status -z --diff-filter=ADMR 917a7397f06657ab25fa0612b8c9f8e00aefbf33...refs/remotes/origin/seeded-episode-sampling [11ms]\n2025-06-25 00:39:11.874 [info] > git config --get commit.template [12ms]\n2025-06-25 00:39:11.882 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) refs/heads/seeded-episode-sampling refs/remotes/seeded-episode-sampling [11ms]\n",log,content
|
206 |
+
205,219881,"vscode.git.Git",34828,0,"2025-06-25 00:39:11.908 [info] > git status -z -uall [15ms]\n2025-06-25 00:39:11.924 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [22ms]\n2025-06-25 00:39:12.015 [info] > git config --local branch.seeded-episode-sampling.github-pr-owner-number [4ms]\n2025-06-25 00:39:12.015 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-25 00:39:12.016 [info] > git diff --no-color [12ms]\n2025-06-25 00:39:12.083 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [84ms]\n",log,content
|
207 |
+
206,220315,"utils/nn.py",0,0,"",python,tab
|
208 |
+
207,221950,"TERMINAL",0,0,"^C",,terminal_command
|
209 |
+
208,221951,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;4bd28968-9983-490f-a4cb-77f4e120a232]633;C]0;franz.srambical@hpc-submit02:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D",,terminal_output
|
210 |
+
209,222128,"TERMINAL",0,0,"^C",,terminal_command
|
211 |
+
210,222129,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;4bd28968-9983-490f-a4cb-77f4e120a232]633;C]0;franz.srambical@hpc-submit02:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D",,terminal_output
|
212 |
+
211,223240,"TERMINAL",0,0,"squeue -w supergpu16,supergpu18,gpusrv[69,70],supergpu14",,terminal_command
|
213 |
+
212,223241,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;squeue -w supergpu16,supergpu18,gpusrv[69,70],supergpu14;4bd28968-9983-490f-a4cb-77f4e120a232]633;C",,terminal_output
|
214 |
+
213,273141,"utils/nn.py",1484,0,"",python,selection_mouse
|
215 |
+
214,658929,"TERMINAL",0,0,"slurm_load_node error: Unable to contact slurm controller (connect failure)\r\n]0;franz.srambical@hpc-submit02:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D;1",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0f7f1a2c-8092-4a29-81e5-3d0f406b88711751465682686-2025_07_02-16.15.28.930/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-207fe3e3-7fd2-432d-a410-a7a943195e5f1753557295596-2025_07_26-21.15.03.812/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-24737a5e-b7f6-491e-94c6-0c20304cd1e41754227167268-2025_08_03-15.19.34.553/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-372ef62d-2075-43ff-ac1a-e2025fd873c41751612450082-2025_07_04-09.01.47.125/source.csv
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,30,"tests/test_checkpointer.py",0,0,"import unittest\nimport tempfile\nimport os\nimport jax\nimport jax.numpy as jnp\nfrom flax.training import orbax_utils\nfrom orbax.checkpoint import PyTreeCheckpointer\nfrom pathlib import Path\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom flax.training.train_state import TrainState\nimport optax\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\n\nclass DistributedCheckpointerTest(unittest.TestCase):\n def setUp(self):\n super().setUp()\n self._temp_dir_manager = tempfile.TemporaryDirectory()\n self.checkpoint_dir = Path(self._temp_dir_manager.name)\n self.addCleanup(self._temp_dir_manager.cleanup)\n\n # FIXME (f.srambical): If the tests pass, we should use the default model config instead\n self.model_kwargs = dict(\n in_dim=3,\n model_dim=8,\n latent_dim=4,\n num_latents=16,\n patch_size=2,\n num_blocks=1,\n num_heads=1,\n dropout=0.0,\n codebook_dropout=0.0,\n )\n self.image_shape = (8, 8, 3)\n self.seq_len = 2\n self.batch_size = 2\n self.seed = 0\n\n def test_distributed_checkpointing(self):\n jax.distributed.initialize()\n num_devices = jax.device_count()\n self.assertGreater(num_devices, 0)\n\n model = TokenizerVQVAE(**self.model_kwargs)\n rng = jax.random.PRNGKey(self.seed)\n dummy_inputs = dict(\n videos=jnp.zeros((self.batch_size, self.seq_len, *self.image_shape), dtype=jnp.float32)\n )\n params = model.init(rng, dummy_inputs)\n\n tx = optax.adam(1e-3)\n state = TrainState.create(apply_fn=model.apply, params=params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n state = jax.device_put(state, replicated_sharding)\n\n ckpt = {""model"": state}\n orbax_checkpointer = PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n ckpt_path = str(self.checkpoint_dir / ""test_ckpt"")\n orbax_checkpointer.save(ckpt_path, ckpt, save_args=save_args)\n self.assertTrue(os.path.exists(ckpt_path))\n\n restore_target = {""model"": state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n restored = orbax_checkpointer.restore(ckpt_path, item=restore_target, restore_args=restore_args)\n # Compare parameters recursively, handling nested structure\n def compare_params(original, restored):\n if isinstance(original, dict):\n for k in original.keys():\n compare_params(original[k], restored[k])\n else:\n self.assertTrue(jnp.allclose(original, restored))\n \n compare_params(state.params, restored[""model""].params)\n\nif __name__ == ""__main__"":\n unittest.main()\n",python,tab
|
3 |
+
2,816,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:01:45 AM [info] Activating crowd-code\n9:01:47 AM [info] Recording started\n9:01:47 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,1444,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"9:01:47 AM [info] Git repository found\n9:01:47 AM [info] Git provider initialized successfully\n9:01:47 AM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,10577,"TERMINAL",0,0,"/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
|
6 |
+
5,10616,"TERMINAL",0,0,"]633;E;/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;0208a1bb-6063-4ca7-88cc-59ff15d627ad]633;C",,terminal_output
|
7 |
+
6,10697,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit01:/ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
|
8 |
+
7,27626,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69 --cpus-per-task=1 --ntasks-per-node=16",,terminal_command
|
9 |
+
8,27705,"TERMINAL",0,0,"]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69 --cpus-per-task=1 --ntasks-per-node=16;d83d794d-068d-45a7-86c8-da2446d84194]633;Csalloc: Pending job allocation 26666312\r\nsalloc: job 26666312 queued and waiting for resources\r\n",,terminal_output
|
10 |
+
9,33297,"TERMINAL",0,0,"bash",,terminal_focus
|
11 |
+
10,35170,"TERMINAL",0,0,"idle",,terminal_command
|
12 |
+
11,35173,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;idle;a8707c05-ae9b-4a50-91c9-fa9c06501dad]633;Cbash: idle: command not found\r\n]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D;127",,terminal_output
|
13 |
+
12,41878,"TERMINAL",0,0,"squeue -w supergpu16,supergpu18,gpusrv[69,70],supergpu14",,terminal_command
|
14 |
+
13,41936,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;squeue -w supergpu16,supergpu18,gpusrv[69,70],supergpu14;a8707c05-ae9b-4a50-91c9-fa9c06501dad]633;C[?25h",,terminal_output
|
15 |
+
14,41937,"TERMINAL",0,0," JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\r\n 26649778 gpu_p test_kto muhammad R 15:15:33 1 supergpu14\r\n 26644304 gpu_p old_gpt helena.f R 14:08:26 1 supergpu14\r\n",,terminal_output
|
16 |
+
15,44976,"TERMINAL",0,0,"salloc",,terminal_focus
|
17 |
+
16,50016,"TERMINAL",0,0,"bash",,terminal_focus
|
18 |
+
17,58348,"TERMINAL",0,0,"",,terminal_focus
|
19 |
+
18,58857,"TERMINAL",0,0,"",,terminal_focus
|
20 |
+
19,60096,"TERMINAL",0,0,"bash",,terminal_focus
|
21 |
+
20,60736,"TERMINAL",0,0,"salloc",,terminal_focus
|
22 |
+
21,63016,"TERMINAL",0,0,"^Csalloc: Job allocation 26666312 has been revoked.\r\nsalloc: Job aborted due to signal\r\n",,terminal_output
|
23 |
+
22,67417,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv70 --cpus-per-task=1 --ntasks-per-node=16",,terminal_command
|
24 |
+
23,67496,"TERMINAL",0,0,"]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv70 --cpus-per-task=1 --ntasks-per-node=16;d83d794d-068d-45a7-86c8-da2446d84194]633;Csalloc: Pending job allocation 26666314\r\nsalloc: job 26666314 queued and waiting for resources\r\n",,terminal_output
|
25 |
+
24,78434,"TERMINAL",0,0,"^Csalloc: Job allocation 26666314 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D;1]633;P;Cwd=/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h",,terminal_output
|
26 |
+
25,88537,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=1 --ntasks-per-node=1",,terminal_command
|
27 |
+
26,88615,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=1 --ntasks-per-node=1;d83d794d-068d-45a7-86c8-da2446d84194]633;Csalloc: Granted job allocation 26666315\r\n",,terminal_output
|
28 |
+
27,88708,"TERMINAL",0,0,"salloc: Nodes gpusrv[69-70] are ready for job\r\n",,terminal_output
|
29 |
+
28,89048,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h[franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
30 |
+
29,97467,"TERMINAL",0,0,"\r(reverse-i-search)`': [K",,terminal_output
|
31 |
+
30,97592,"TERMINAL",0,0,"s': python -m unittest tests.te[7ms[27mt_checkpointer -v",,terminal_output
|
32 |
+
31,97669,"TERMINAL",0,0,"[?25l[58;52H[0mt[58;52H[58;51H[7;39;49ms[58;51H[0m\r[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[Co': . ""/ictstr01/home/aih/franz.srambical/.cur[7mso[27mr-server/bin/5b19bac7a947f54e4caa3eb7e4c5fbf832389850/out/vs/workbench/contrib/terminal/common/scripts/shellIntegration-bash.sh""[A[A[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[?25h",,terminal_output
|
33 |
+
32,97736,"TERMINAL",0,0,"\r[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[24Pu': [7msou[27mrce .venv/bin/activate\r\n\r[K\r\n\r[K[A[A[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[1@r': [7msour[27m",,terminal_output
|
34 |
+
33,97908,"TERMINAL",0,0,"[?25l[56;28H[7;39;49mo[56;28H[56;27H[7;39;49ms[56;27H[0m[1@c': [7msourc[27m[?25h[1@e': [7msource[27m",,terminal_output
|
35 |
+
34,98276,"TERMINAL",0,0,"[?25l[56;29H\r[6@[franz.srambical@gpusrv69 jafar]$ source\r\n[?2004l\r]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h(jafar) [franz.srambical@gpusrv69 jafar]$ [?25h",,terminal_output
|
36 |
+
35,98428,"TERMINAL",0,0,"\r(reverse-i-search)`': [K",,terminal_output
|
37 |
+
36,99288,"TERMINAL",0,0,"u': so[7mu[27mrce .venv/bin/activaten': python -m [7mun[27mittest tests.test_checkpointer -v",,terminal_output
|
38 |
+
37,99407,"TERMINAL",0,0,"[?25l[57;36H[7;39;49mn[57;36H[57;35H[7;39;49mu[57;35H[0m[1@i': python -m [7muni[27m[?25h",,terminal_output
|
39 |
+
38,99596,"TERMINAL",0,0,"[1@t': python -m [7munit[27m",,terminal_output
|
40 |
+
39,99728,"TERMINAL",0,0,"[?25l[57;37H[7;39;49mu[57;37H[0m[1@t': python -m [7munitt[27m[?25h",,terminal_output
|
41 |
+
40,100124,"TERMINAL",0,0,"\r[Cjafar) [franz.srambical@gpusrv69 jafar]$ python -m unittest tests.test_checkpointer -v[A[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C\r\n\r[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
42 |
+
41,100566,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output
|
43 |
+
42,132885,"tests/test_checkpointer.py",0,0,"",python,tab
|
44 |
+
43,166211,"TERMINAL",0,0,"test_distributed_checkpointing (tests.test_checkpointer.DistributedCheckpointerTest) ... ",,terminal_output
|
45 |
+
44,169265,"TERMINAL",0,0,"bash",,terminal_focus
|
46 |
+
45,472087,"TERMINAL",0,0,"2025-07-04 09:09:39.070277: F external/xla/xla/pjrt/distributed/client.h:80] Terminating process because the JAX distributed service detected fatal errors. This most likely indicates that another task died; see the other task logs for more details. Disable Python buffering, i.e. `python -u`, to be sure to see all the previous output. absl::Status: DEADLINE_EXCEEDED: Deadline Exceeded\r\n\r\nRPC: /tensorflow.CoordinationService/RegisterTask\r\n",,terminal_output
|
47 |
+
46,472174,"TERMINAL",0,0,"Aborted (core dumped)\r\n]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h(jafar) [franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-3e3ce02e-664a-4f58-9d7f-0f56e32c7def1753363875204-2025_07_24-15.31.23.202/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-46148775-4197-4774-bf77-8631ca6b73f01753557591807-2025_07_26-21.19.58.968/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5d37fb4d-73be-43f4-bdda-1c3c7db3bdf31752589529764-2025_07_15-16.25.37.55/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-659ce403-f41d-4217-a14b-978086650bc21753384200795-2025_07_24-21.10.09.978/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6da126cd-f641-4a15-bc10-f51c6b432fda1753788630792-2025_07_29-13.30.37.18/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-7879e034-f897-48e8-8481-1a87a73b0dc81752135543307-2025_07_10-10.19.09.565/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-803c0497-2cfb-4591-a2cb-bac49e1c774c1751564777714-2025_07_03-19.47.11.372/source.csv
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,4,"experiments/tokenizer_cross_node_checkpointing_test.sh",0,0,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\n\ndata_dir='data_tfrecords'\n\nsrun python train_tokenizer.py \\n --batch_size 12 \\n --ckpt_dir checkpoints/tokenizer_cross_node_checkpointing_test \\n --log_checkpoint_interval 10 \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
3 |
+
2,1977,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:47:09 PM [info] Activating crowd-code\n7:47:11 PM [info] Recording started\n7:47:11 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,2179,"test.sh",0,0,"",shellscript,tab
|
5 |
+
4,3133,"test.sh",0,0," \n#!/bin/bash\n#SBATCH --nodes=1\n#SBATCH --ntasks-per-node=4\n#SBATCH --gres=gpu:4\n#SBATCH --cpus-per-task=2\n#SBATCH --time=00:01:00\n#SBATCH --job-name=gpu_check\n#SBATCH --output=gpu_check.out\n\n# This command will be run for each of the 4 tasks\n# Each task will print its unique environment\nsrun bash -c 'echo ""Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, CUDA_VISIBLE_DEVICES: $CUDA_VISIBLE_DEVICES""'\n\n ",shellscript,content
|
6 |
+
5,3557,"test.sh",443,0,"",shellscript,selection_command
|
7 |
+
6,8718,"test.sh",5,0,"",shellscript,selection_command
|
8 |
+
7,8723,"test.sh",0,7,"",shellscript,content
|
9 |
+
8,8726,"test.sh",12,0,"",shellscript,selection_command
|
10 |
+
9,8727,"test.sh",30,0,"",shellscript,selection_command
|
11 |
+
10,8728,"test.sh",58,0,"",shellscript,selection_command
|
12 |
+
11,8729,"test.sh",79,0,"",shellscript,selection_command
|
13 |
+
12,8730,"test.sh",105,0,"",shellscript,selection_command
|
14 |
+
13,8730,"test.sh",129,0,"",shellscript,selection_command
|
15 |
+
14,8731,"test.sh",158,0,"",shellscript,selection_command
|
16 |
+
15,8731,"test.sh",189,0,"",shellscript,selection_command
|
17 |
+
16,8732,"test.sh",190,0,"",shellscript,selection_command
|
18 |
+
17,8732,"test.sh",241,0,"",shellscript,selection_command
|
19 |
+
18,8733,"test.sh",287,0,"",shellscript,selection_command
|
20 |
+
19,8733,"test.sh",432,0,"",shellscript,selection_command
|
21 |
+
20,8733,"test.sh",433,0,"",shellscript,selection_command
|
22 |
+
21,8753,"test.sh",432,5,"",shellscript,content
|
23 |
+
22,8755,"test.sh",431,1,"",shellscript,content
|
24 |
+
23,8756,"test.sh",287,0,"",shellscript,selection_command
|
25 |
+
24,10256,"TERMINAL",0,0,"/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
|
26 |
+
25,10301,"TERMINAL",0,0,"]633;E;/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;71118bed-1073-4876-b758-445b77add838]633;C",,terminal_output
|
27 |
+
26,10427,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit01:/ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
|
28 |
+
27,13057,"test.sh",241,0,"",shellscript,selection_command
|
29 |
+
28,13332,"test.sh",190,0,"",shellscript,selection_command
|
30 |
+
29,13341,"test.sh",189,0,"",shellscript,selection_command
|
31 |
+
30,13905,"test.sh",190,0,"",shellscript,selection_command
|
32 |
+
31,14077,"test.sh",241,0,"",shellscript,selection_command
|
33 |
+
32,14361,"test.sh",241,45,"# Each task will print its unique environment",shellscript,selection_command
|
34 |
+
33,16057,"test.sh",12,275,"",shellscript,content
|
35 |
+
34,31353,"test.sh",81,0,"",shellscript,selection_mouse
|
36 |
+
35,32511,"test.sh",67,0,"",shellscript,selection_command
|
37 |
+
36,32722,"test.sh",66,0,"",shellscript,selection_command
|
38 |
+
37,32784,"test.sh",64,0,"",shellscript,selection_command
|
39 |
+
38,32795,"test.sh",62,0,"",shellscript,selection_command
|
40 |
+
39,32807,"test.sh",57,0,"",shellscript,selection_command
|
41 |
+
40,32842,"test.sh",51,0,"",shellscript,selection_command
|
42 |
+
41,32877,"test.sh",48,0,"",shellscript,selection_command
|
43 |
+
42,32912,"test.sh",40,0,"",shellscript,selection_command
|
44 |
+
43,32972,"test.sh",38,0,"",shellscript,selection_command
|
45 |
+
44,33129,"test.sh",36,0,"",shellscript,selection_command
|
46 |
+
45,33256,"test.sh",32,0,"",shellscript,selection_command
|
47 |
+
46,34207,"test.sh",32,1,"H",shellscript,selection_command
|
48 |
+
47,34239,"test.sh",32,4,"Host",shellscript,selection_command
|
49 |
+
48,34535,"test.sh",32,5,"Host:",shellscript,selection_command
|
50 |
+
49,34556,"test.sh",32,8,"Host: $(",shellscript,selection_command
|
51 |
+
50,34561,"test.sh",32,16,"Host: $(hostname",shellscript,selection_command
|
52 |
+
51,34590,"test.sh",32,18,"Host: $(hostname),",shellscript,selection_command
|
53 |
+
52,34642,"test.sh",32,24,"Host: $(hostname), Slurm",shellscript,selection_command
|
54 |
+
53,34702,"test.sh",32,29,"Host: $(hostname), Slurm Task",shellscript,selection_command
|
55 |
+
54,34716,"test.sh",32,32,"Host: $(hostname), Slurm Task ID",shellscript,selection_command
|
56 |
+
55,34732,"test.sh",32,33,"Host: $(hostname), Slurm Task ID:",shellscript,selection_command
|
57 |
+
56,34761,"test.sh",32,35,"Host: $(hostname), Slurm Task ID: $",shellscript,selection_command
|
58 |
+
57,34806,"test.sh",32,49,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID",shellscript,selection_command
|
59 |
+
58,34827,"test.sh",32,50,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID,",shellscript,selection_command
|
60 |
+
59,34859,"test.sh",32,56,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local",shellscript,selection_command
|
61 |
+
60,34911,"test.sh",32,61,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank",shellscript,selection_command
|
62 |
+
61,34966,"test.sh",32,62,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank:",shellscript,selection_command
|
63 |
+
62,34968,"test.sh",32,64,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $",shellscript,selection_command
|
64 |
+
63,35017,"test.sh",32,77,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID",shellscript,selection_command
|
65 |
+
64,35045,"test.sh",32,78,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID,",shellscript,selection_command
|
66 |
+
65,35094,"test.sh",32,99,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, CUDA_VISIBLE_DEVICES",shellscript,selection_command
|
67 |
+
66,35124,"test.sh",32,100,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, CUDA_VISIBLE_DEVICES:",shellscript,selection_command
|
68 |
+
67,35144,"test.sh",32,102,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, CUDA_VISIBLE_DEVICES: $",shellscript,selection_command
|
69 |
+
68,35450,"test.sh",32,122,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, CUDA_VISIBLE_DEVICES: $CUDA_VISIBLE_DEVICES",shellscript,selection_command
|
70 |
+
69,36051,"test.sh",32,103,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, CUDA_VISIBLE_DEVICES: $C",shellscript,selection_command
|
71 |
+
70,36177,"test.sh",32,102,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, CUDA_VISIBLE_DEVICES: $",shellscript,selection_command
|
72 |
+
71,36310,"test.sh",32,100,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, CUDA_VISIBLE_DEVICES:",shellscript,selection_command
|
73 |
+
72,36493,"test.sh",32,80,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, C",shellscript,selection_command
|
74 |
+
73,36656,"test.sh",32,78,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID,",shellscript,selection_command
|
75 |
+
74,36970,"test.sh",32,79,"Host: $(hostname), Slurm Task ID: $SLURM_TASK_PID, Local Rank: $SLURM_LOCALID, ",shellscript,selection_command
|
76 |
+
75,37157,"test.sh",32,79,"",shellscript,content
|
77 |
+
76,38104,"test.sh",12,0,"",shellscript,selection_command
|
78 |
+
77,50152,"test.sh",48,0,"",shellscript,selection_mouse
|
79 |
+
78,50949,"test.sh",12,0,"",shellscript,selection_command
|
80 |
+
79,51060,"test.sh",17,0,"",shellscript,selection_command
|
81 |
+
80,51236,"test.sh",22,0,"",shellscript,selection_command
|
82 |
+
81,51527,"test.sh",17,0,"",shellscript,selection_command
|
83 |
+
82,51669,"test.sh",17,1,"b",shellscript,selection_command
|
84 |
+
83,51750,"test.sh",17,4,"bash",shellscript,selection_command
|
85 |
+
84,51941,"test.sh",17,6,"bash -",shellscript,selection_command
|
86 |
+
85,52281,"test.sh",17,7,"bash -c",shellscript,selection_command
|
87 |
+
86,52441,"test.sh",17,8,"bash -c ",shellscript,selection_command
|
88 |
+
87,52670,"test.sh",17,8,"",shellscript,content
|
89 |
+
88,55817,"test.sh",17,0,"bash -c ",shellscript,content
|
90 |
+
89,55838,"test.sh",17,0,"",shellscript,selection_command
|
91 |
+
90,86927,"test.sh",22,0,"",shellscript,selection_command
|
92 |
+
91,87266,"test.sh",23,0,"",shellscript,selection_command
|
93 |
+
92,87646,"test.sh",25,0,"",shellscript,selection_command
|
94 |
+
93,88006,"test.sh",23,0,"",shellscript,selection_command
|
95 |
+
94,88146,"test.sh",22,0,"",shellscript,selection_command
|
96 |
+
95,89947,"test.sh",23,0,"",shellscript,selection_command
|
97 |
+
96,90526,"test.sh",22,0,"",shellscript,selection_command
|
98 |
+
97,90935,"test.sh",17,0,"",shellscript,selection_command
|
99 |
+
98,92047,"test.sh",22,0,"",shellscript,selection_command
|
100 |
+
99,96868,"test.sh",17,0,"",shellscript,selection_command
|
101 |
+
100,97256,"test.sh",17,1,"b",shellscript,selection_command
|
102 |
+
101,97272,"test.sh",17,4,"bash",shellscript,selection_command
|
103 |
+
102,97487,"test.sh",17,6,"bash -",shellscript,selection_command
|
104 |
+
103,97747,"test.sh",17,7,"bash -c",shellscript,selection_command
|
105 |
+
104,97906,"test.sh",17,8,"bash -c ",shellscript,selection_command
|
106 |
+
105,98087,"test.sh",17,9,"bash -c '",shellscript,selection_command
|
107 |
+
106,98304,"test.sh",17,9,"",shellscript,content
|
108 |
+
107,99047,"test.sh",22,0,"",shellscript,selection_command
|
109 |
+
108,99847,"test.sh",22,1,"""",shellscript,selection_command
|
110 |
+
109,99907,"test.sh",22,21,"""CUDA_VISIBLE_DEVICES",shellscript,selection_command
|
111 |
+
110,100147,"test.sh",22,22,"""CUDA_VISIBLE_DEVICES:",shellscript,selection_command
|
112 |
+
111,100247,"test.sh",22,24,"""CUDA_VISIBLE_DEVICES: $",shellscript,selection_command
|
113 |
+
112,100727,"test.sh",22,23,"""CUDA_VISIBLE_DEVICES: ",shellscript,selection_command
|
114 |
+
113,100885,"test.sh",22,23,"",shellscript,content
|
115 |
+
114,101116,"test.sh",45,0,"",shellscript,selection_command
|
116 |
+
115,101296,"test.sh",44,1,"",shellscript,content
|
117 |
+
116,101447,"test.sh",43,0,"",shellscript,selection_command
|
118 |
+
117,102147,"test.sh",44,0,"",shellscript,selection_command
|
119 |
+
118,102265,"test.sh",43,1,"",shellscript,content
|
120 |
+
119,102626,"test.sh",42,0,"",shellscript,selection_command
|
121 |
+
120,103206,"test.sh",23,0,"",shellscript,selection_command
|
122 |
+
121,113916,"test.sh",43,0,"",shellscript,selection_mouse
|
123 |
+
122,113919,"test.sh",42,0,"",shellscript,selection_command
|
124 |
+
123,119517,"test.sh",11,32,"\nsrun 'echo ""CUDA_VISIBLE_DEVICES: $CUDA_VISIBLE_DEVICES""'",shellscript,content
|
125 |
+
124,119525,"experiments/tokenizer_cross_node_checkpointing_test.sh",0,0,"",shellscript,tab
|
126 |
+
125,146607,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=8 --ntasks-per-node=2",,terminal_command
|
127 |
+
126,146718,"TERMINAL",0,0,"]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=8 --ntasks-per-node=2;d193c799-eb50-4b87-89db-ea172d98a654]633;Csalloc: error: QOSMaxCpuPerJobLimit\r\nsalloc: error: Job submit/allocate failed: Job violates accounting/QOS policy (job submit limit, user's size and/or time limits)\r\n]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D;1]633;P;Cwd=/lustre/groups/haicu/workspace/franz.srambical/jafar",,terminal_output
|
128 |
+
127,151716,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=4 --ntasks-per-node=2",,terminal_command
|
129 |
+
128,151775,"TERMINAL",0,0,"]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=4 --ntasks-per-node=2;d193c799-eb50-4b87-89db-ea172d98a654]633;Csalloc: Granted job allocation 26664557\r\n",,terminal_output
|
130 |
+
129,151886,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output
|
131 |
+
130,152876,"TERMINAL",0,0,"salloc: Nodes gpusrv[69-70] are ready for job\r\n",,terminal_output
|
132 |
+
131,153248,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h[franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
133 |
+
132,155906,"TERMINAL",0,0,"[7msrun echo $CUDA_VISIBLE_DEVICES[27m",,terminal_output
|
134 |
+
133,156146,"TERMINAL",0,0,"[?25l[24;66Hsrun echo $CUDA_VISIBLE_DEVICES\r\n[?2004l\r[?25h",,terminal_output
|
135 |
+
134,156334,"TERMINAL",0,0,"0,1\r\n0,1\r\n0,1\r\n0,1\r\n",,terminal_output
|
136 |
+
135,156487,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h[franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
137 |
+
136,701519,"TERMINAL",0,0,"[?25li[2md[22m[29;37H[?25h",,terminal_output
|
138 |
+
137,701599,"TERMINAL",0,0,"[?25l[29;36Hd[29;37H[?25h",,terminal_output
|
139 |
+
138,701717,"TERMINAL",0,0,"[?25l[29;37Hl[29;39H[?25h",,terminal_output
|
140 |
+
139,701795,"TERMINAL",0,0,"[?25l[29;38He[29;39H[?25h",,terminal_output
|
141 |
+
140,702080,"TERMINAL",0,0,"[?25l[?2004l\r[?25hbash: idle: command not found\r\n]0;franz.srambical@hpc-submit01:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h[franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-90a7d2df-8655-400e-8729-5616e02268171751547517319-2025_07_03-14.58.47.421/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-9899324d-b79d-45d0-8d10-efdb2a606e141753968905455-2025_07_31-15.35.12.899/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-9e1e9a37-9c93-49f5-86bb-957e61f072951753546714480-2025_07_26-18.18.42.334/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-a06af857-5252-41ea-b944-6fb276580a331751465854969-2025_07_02-16.18.54.752/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-a5280753-495e-4391-8448-c3c5679e94b01753346266343-2025_07_24-10.37.53.687/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-ba0d66ec-6cdf-4899-b3c6-5e02420596081753426538563-2025_07_25-08.55.50.825/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-c0810100-7d8e-4105-bf12-84868f66800c1753216757462-2025_07_22-22.39.25.825/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-ccd47d51-aa58-4f87-bfea-141fbcfe923f1754059235262-2025_08_01-16.40.40.908/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f2478f34-0247-46ae-b2bf-367520171d271754324711637-2025_08_04-18.25.17.542/source.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
2,228,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"6:25:17 PM [info] Activating crowd-code\n6:25:17 PM [info] Recording started\n6:25:17 PM [info] Initializing git provider using file system watchers...\n6:25:17 PM [info] Git repository found\n6:25:17 PM [info] Git provider initialized successfully\n",Log,tab
|
3 |
+
3,437,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"6:25:17 PM [info] Initial git state: [object Object]\n",Log,content
|
4 |
+
4,283968,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n use_gt_actions: bool = False\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n if args.use_gt_actions:\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_actions, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n else:\n codebook_usage_lam = None\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n use_gt_actions=args.use_gt_actions,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n use_gt_actions=args.use_gt_actions,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos, actions in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, actions=actions, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f493ee6f-fdfb-4a84-bc4c-0358523e54001754136495510-2025_08_02-14.08.22.904/source.csv
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,3,"genie.py",0,0,"from typing import Dict\nimport time\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT, DynamicsCausal\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\n\nclass Genie(nnx.Module):\n """"""Genie model""""""\n\n def __init__(\n self,\n in_dim: int,\n tokenizer_dim: int,\n tokenizer_ffn_dim: int,\n latent_patch_dim: int,\n num_patch_latents: int,\n patch_size: int,\n tokenizer_num_blocks: int,\n tokenizer_num_heads: int,\n lam_dim: int,\n lam_ffn_dim: int,\n latent_action_dim: int,\n num_latent_actions: int,\n lam_patch_size: int,\n lam_num_blocks: int,\n lam_num_heads: int,\n lam_co_train: bool,\n dyna_type: str,\n dyna_dim: int,\n dyna_ffn_dim: int,\n dyna_num_blocks: int,\n dyna_num_heads: int,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n dropout: float = 0.0,\n mask_limit: float = 0.0,\n ):\n # --- Tokenizer ---\n self.in_dim = in_dim\n self.tokenizer_dim = tokenizer_dim\n self.tokenizer_ffn_dim = tokenizer_ffn_dim\n self.latent_patch_dim = latent_patch_dim\n self.num_patch_latents = num_patch_latents\n self.patch_size = patch_size\n self.tokenizer_num_blocks = tokenizer_num_blocks\n self.tokenizer_num_heads = tokenizer_num_heads\n # --- LAM ---\n self.lam_dim = lam_dim\n self.lam_ffn_dim = lam_ffn_dim\n self.latent_action_dim = latent_action_dim\n self.num_latent_actions = num_latent_actions\n self.lam_patch_size = lam_patch_size\n self.lam_num_blocks = lam_num_blocks\n self.lam_num_heads = lam_num_heads\n self.lam_co_train = lam_co_train\n # --- Dynamics ---\n self.dyna_type = dyna_type\n self.dyna_dim = dyna_dim\n self.dyna_ffn_dim = dyna_ffn_dim\n self.dyna_num_blocks = dyna_num_blocks\n self.dyna_num_heads = dyna_num_heads\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.dropout = dropout\n self.mask_limit = mask_limit\n\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n if self.dyna_type == ""maskgit"":\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n elif self.dyna_type == ""causal"":\n self.dynamics = DynamicsCausal(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n else:\n raise ValueError(f""Invalid dynamics type: {self.dyna_type}"")\n\n def __call__(\n self, batch: Dict[str, jax.Array], training: bool = True\n ) -> Dict[str, jax.Array]:\n videos_BTHWC = batch[""videos""]\n tokenizer_outputs = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_indices_BTN = tokenizer_outputs[""indices""]\n lam_outputs = self.lam.vq_encode(videos_BTHWC, training=False)\n z_q_BTm11L = lam_outputs[""z_q""]\n action_indices_E = lam_outputs[""indices""]\n latent_actions_BTm11L = jax.lax.cond(\n self.lam_co_train,\n lambda: z_q_BTm11L,\n lambda: jax.lax.stop_gradient(z_q_BTm11L),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(token_indices_BTN),\n latent_actions=latent_actions_BTm11L,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_logits_BTNV, dyna_mask = self.dynamics(outputs, training)\n outputs[""token_logits""] = dyna_logits_BTNV\n if dyna_mask is not None:\n outputs[""mask""] = dyna_mask\n mle_indices_BTN = jnp.argmax(outputs[""token_logits""], axis=-1)\n H, W = batch[""videos""].shape[2:4]\n outputs[""recon""] = self.tokenizer.decode(mle_indices_BTN, (H, W))\n outputs[""lam_indices""] = action_indices_E\n return outputs\n\n # FIXME (f.srambical): sampling should be moved to the dynamics classes\n def sample(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> jax.Array:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n latent_actions_E = batch[""latent_actions""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n\n def maskgit_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array], step: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array, jax.Array, jax.Array], None]:\n rng, token_idxs_BSN, mask_BSN, action_tokens_EL = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # --- Construct + encode video ---\n vid_embed_BSNM = self.dynamics.patch_embed(token_idxs_BSN)\n mask_token_111M = self.dynamics.mask_token.value\n mask_expanded_BSN1 = mask_BSN[..., None]\n vid_embed_BSNM = jnp.where(mask_expanded_BSN1, mask_token_111M, vid_embed_BSNM)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = self.dynamics.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1]))\n vid_embed_BSNM += act_embed_BS1M\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (steps * 2))\n step_temp = temperature * (1.0 - unmasked_ratio)\n final_logits_BSNV = self.dynamics.transformer(vid_embed_BSNM) / step_temp\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_BSN = jnp.argmax(final_logits_BSNV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_BSN = jax.random.categorical(_rng, final_logits_BSNV)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs_BSN = gather_fn(\n jax.nn.softmax(final_logits_BSNV), sampled_token_idxs_BSN\n )\n final_token_probs_BSN += ~mask_BSN\n # Update masked tokens only\n token_idxs_BSN = jnp.where(mask_BSN, sampled_token_idxs_BSN, token_idxs_BSN)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask_N = jnp.arange(final_token_probs_BSN.shape[-1]) > num_unmasked_tokens\n sorted_idxs_BSN = jnp.argsort(final_token_probs_BSN, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask_N))\n new_mask_BSN = mask_update_fn(mask_BSN, sorted_idxs_BSN)\n\n new_carry = (rng, token_idxs_BSN, new_mask_BSN, action_tokens_EL)\n return new_carry, None\n\n def generation_step_fn(\n carry: tuple[jax.Array, jax.Array], step_t: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array], None]:\n rng, current_token_idxs_BSN = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask_S = jnp.arange(seq_len) >= step_t\n mask_BSN = jnp.broadcast_to(mask_S[None, :, None], (B, seq_len, N)).astype(\n bool\n )\n masked_token_idxs_BSN = current_token_idxs_BSN * ~mask_BSN\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs_BSN,\n mask_BSN,\n action_tokens_EL,\n )\n final_carry_maskgit, _ = jax.lax.scan(\n maskgit_step_fn, init_carry_maskgit, jnp.arange(steps)\n )\n updated_token_idxs_BSN = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs_BSN)\n return new_carry, None\n\n # --- Run the autoregressive generation using jax.lax.scan ---\n initial_carry = (batch[""rng""], token_idxs_BSN)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs_BSN = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC\n\n def sample_causal(\n self,\n batch: Dict[str, jax.Array],\n seq_len: int,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> jax.Array:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n M: model dimension\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n # FIXME (f.srambical): reset spatial kv cache after each frame\n assert isinstance(self.dynamics, DynamicsCausal)\n # --- Encode videos and actions ---\n videos_BTHWC = batch[""videos""]\n latent_actions_E = batch[""latent_actions""]\n tokenizer_out = self.tokenizer.vq_encode(videos_BTHWC, training=False)\n token_idxs_BTN = tokenizer_out[""indices""]\n B, T, N = token_idxs_BTN.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs_BTN.dtype)\n token_idxs_BSN = jnp.concatenate([token_idxs_BTN, pad], axis=1)\n action_tokens_EL = self.lam.vq.get_codes(latent_actions_E)\n dynamics_causal: DynamicsCausal = self.dynamics\n\n for block in dynamics_causal.transformer.blocks:\n block.spatial_attention.init_cache((B * seq_len, (N + 1), self.dyna_dim), dtype=self.dtype)\n block.temporal_attention.init_cache((B * (N + 1), seq_len, self.dyna_dim), dtype=self.dtype)\n\n def causal_step_fn(\n carry: tuple[jax.Array, jax.Array, jax.Array, jax.Array], step_n: jax.Array\n ) -> tuple[tuple[jax.Array, jax.Array, jax.Array, jax.Array], None]:\n rng, token_idxs_BSN, action_tokens_EL, step_t = carry\n S, N = token_idxs_BSN.shape[1:]\n L = action_tokens_EL.shape[-1]\n\n # --- Construct + encode video ---\n vid_embed_BSNM = dynamics_causal.patch_embed(token_idxs_BSN)\n\n # --- Predict transition ---\n action_tokens_BSm1L = jnp.reshape(action_tokens_EL, (B, S - 1, L))\n act_embed_BSm1M = dynamics_causal.action_up(action_tokens_BSm1L)\n act_embed_BSM = jnp.pad(act_embed_BSm1M, ((0, 0), (1, 0), (0, 0)))\n act_embed_BS1M = jnp.reshape(act_embed_BSM, (B, S, 1, act_embed_BSM.shape[-1]))\n vid_embed_BSNp1M = jnp.concatenate([act_embed_BS1M, vid_embed_BSNM], axis=2)\n final_logits_BTNp1V = dynamics_causal.transformer(vid_embed_BSNp1M, (step_t, step_n)) / temperature\n final_logits_BV = final_logits_BTNp1V[:, step_t, step_n, :]\n\n # --- Sample new tokens for final frame ---\n if sample_argmax:\n sampled_token_idxs_B = jnp.argmax(final_logits_BV, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs_B = jax.random.categorical(_rng, final_logits_BV)\n # Update next tokens only\n token_idxs_BSN = token_idxs_BSN.at[:, step_t, step_n].set(sampled_token_idxs_B)\n step_t += 1\n\n new_carry = (rng, token_idxs_BSN, action_tokens_EL, step_t)\n return new_carry, None\n\n # --- Run the autoregressive generation using a for loop ---\n rng = batch[""rng""]\n current_token_idxs_BSN = token_idxs_BSN\n \n for step_t in range(T, seq_len):\n rng, step_rng = jax.random.split(rng)\n\n # --- Reset spatial KV caches before each frame ---\n for block in dynamics_causal.transformer.blocks:\n block.spatial_attention.init_cache((B * seq_len, (N + 1), self.dyna_dim), dtype=self.dtype)\n #breakpoint()\n\n # --- Initialize and run causal loop ---\n init_carry_causal = (\n step_rng,\n current_token_idxs_BSN,\n action_tokens_EL,\n jnp.array(step_t, dtype=jnp.int32),\n )\n\n # current_token_idxs_BSN.block_until_ready()\n # start = time.time()\n final_carry_causal, _ = jax.lax.scan(\n causal_step_fn, init_carry_causal, jnp.arange(N)\n )\n # final_carry_causal[1].block_until_ready()\n # elapsed = time.time() - start\n # print(f""Autoregressive generation time: {elapsed:.4f}s"")\n # breakpoint()\n current_token_idxs_BSN = final_carry_causal[1]\n \n final_token_idxs_BSN = current_token_idxs_BSN\n\n # --- Decode all tokens at once at the end ---\n H, W = batch[""videos""].shape[2:4]\n final_frames_BSHWC = self.tokenizer.decode(\n final_token_idxs_BSN,\n video_hw=(H, W),\n )\n return final_frames_BSHWC\n\n def vq_encode(self, batch: Dict[str, jax.Array], training: bool) -> jax.Array:\n # --- Preprocess videos ---\n video_BTHWC = batch[""videos""]\n lam_output = self.lam.vq_encode(video_BTHWC, training=training)\n lam_indices_E = lam_output[""indices""]\n return lam_indices_E\n\n# FIXME (f.srambical): add conversion script for old checkpoints\ndef restore_genie_components(\n optimizer: nnx.Optimizer,\n sharding: jax.sharding.NamedSharding,\n rng: jax.Array,\n args,\n) -> nnx.Optimizer:\n """"""Restore pre-trained Genie components""""""\n rngs = nnx.Rngs(rng)\n\n tx = optimizer.tx\n model = optimizer.model\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_tokenizer_optimizer = nnx.Optimizer(dummy_tokenizer, tx)\n dummy_tokenizer_optimizer_state = nnx.state(dummy_tokenizer_optimizer)\n abstract_sharded_tokenizer_optimizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_optimizer_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_tokenizer_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_tokenizer_optimizer.model, restored_tokenizer.model)\n model.tokenizer = dummy_tokenizer_optimizer.model\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_lam_optimizer = nnx.Optimizer(dummy_lam, tx)\n dummy_lam_optimizer_state = nnx.state(dummy_lam_optimizer)\n abstract_sharded_lam_optimizer_state = _create_abstract_sharded_pytree(\n dummy_lam_optimizer_state, sharding\n )\n restored_lam_optimizer = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore( # type: ignore\n abstract_sharded_lam_optimizer_state # type: ignore\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_lam_optimizer.model, restored_lam_optimizer.model)\n model.lam = dummy_lam_optimizer.model\n # Remove the LAM decoder to save memory and avoid unnecessary computation.\n del model.lam.decoder\n lam_checkpoint_manager.close()\n \n # Reinitialize the optimizer states\n optimizer = nnx.Optimizer(model, tx)\n return optimizer\n\n\ndef _create_abstract_sharded_pytree(\n pytree_template: nnx.GraphState, sharding_spec: jax.sharding.NamedSharding\n) -> jax.Array:\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab
|
3 |
+
2,145,"tasks",0,0,"",Log,tab
|
4 |
+
3,147,"genie.py",0,0,"",python,tab
|
5 |
+
4,320,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:08:22 PM [info] Activating crowd-code\n2:08:22 PM [info] Recording started\n2:08:22 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
6 |
+
5,505,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"2:08:23 PM [info] Git repository found\n2:08:23 PM [info] Git provider initialized successfully\n2:08:23 PM [info] Initial git state: [object Object]\n",Log,content
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f4d5bf7f-03bc-460b-912a-a4218449896f1754112326764-2025_08_02-07.25.34.900/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-172d798b-bee8-455a-b904-9dd3fe6387d51754411154298-2025_08_05-18.25.56.221/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-222fe98e-29ac-4b20-9a65-fe2e31f8eb701751128122769-2025_06_28-09.28.47.536/source.csv
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,2,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
3 |
+
2,62,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
4 |
+
3,97,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:28:47 AM [info] Activating crowd-code\n9:28:47 AM [info] Recording started\n9:28:47 AM [info] Initializing git provider using file system watchers...\n9:28:47 AM [info] Git repository found\n9:28:47 AM [info] Git provider initialized successfully\n",Log,content
|
5 |
+
4,195,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"9:28:47 AM [info] Initial git state: [object Object]\n",Log,content
|
6 |
+
5,109366,"train_tokenizer.py",0,0,"",python,tab
|
7 |
+
6,120114,"train_tokenizer.py",0,0,"Switched from branch 'dataloader-reproducibility-test' to 'main'",python,git_branch_checkout
|
8 |
+
7,135115,"train_tokenizer.py",6657,0,"",python,selection_mouse
|
9 |
+
8,135131,"train_tokenizer.py",6656,0,"",python,selection_command
|
10 |
+
9,584544,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
11 |
+
10,587078,"utils/dataloader.py",4313,0,"",python,selection_command
|
12 |
+
11,651295,"utils/dataloader.py",0,0,"",python,selection_command
|
13 |
+
12,652011,"utils/dataloader.py",17,0,"",python,selection_command
|
14 |
+
13,652136,"utils/dataloader.py",28,0,"",python,selection_command
|
15 |
+
14,652214,"utils/dataloader.py",29,0,"",python,selection_command
|
16 |
+
15,652563,"utils/dataloader.py",36,0,"",python,selection_command
|
17 |
+
16,655020,"utils/dataloader.py",4380,0,"",python,selection_command
|
18 |
+
17,655495,"utils/dataloader.py",4341,0,"",python,selection_command
|
19 |
+
18,655578,"utils/dataloader.py",4345,0,"",python,selection_command
|
20 |
+
19,655774,"utils/dataloader.py",4352,0,"",python,selection_command
|
21 |
+
20,655989,"utils/dataloader.py",4359,0,"",python,selection_command
|
22 |
+
21,656093,"utils/dataloader.py",4340,0,"",python,selection_command
|
23 |
+
22,656226,"utils/dataloader.py",4345,0,"",python,selection_command
|
24 |
+
23,656410,"utils/dataloader.py",4340,0,"",python,selection_command
|
25 |
+
24,656615,"utils/dataloader.py",4295,0,"",python,selection_command
|
26 |
+
25,656753,"utils/dataloader.py",4303,0,"",python,selection_command
|
27 |
+
26,656931,"utils/dataloader.py",4305,0,"",python,selection_command
|
28 |
+
27,657095,"utils/dataloader.py",4312,0,"",python,selection_command
|
29 |
+
28,657232,"utils/dataloader.py",4313,0,"",python,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-29e2cbae-7056-4585-b457-f48bd451c3fd1750644341589-2025_06_22-19.05.43.270/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-2bb200ce-4bc8-4bc3-9354-29e24db5d38e1752063967983-2025_07_09-14.26.42.463/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-48fbb415-6db9-4d35-b548-561e828791bf1751383187013-2025_07_01-17.19.57.60/source.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,2,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
3 |
+
2,121,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
4 |
+
3,886,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:19:57 PM [info] Activating crowd-code\n5:19:57 PM [info] Recording started\n5:19:57 PM [info] Initializing git provider using file system watchers...\n5:19:57 PM [info] Git repository found\n5:19:57 PM [info] Git provider initialized successfully\n5:19:57 PM [info] Initial git state: [object Object]\n",Log,content
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-53035602-cd5a-4dad-bc79-2cb4d8d4f7681751162692203-2025_06_28-19.04.53.413/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-55b74e48-50e3-4bf3-8e02-f03e464c22ac1750632538084-2025_06_22-15.48.59.681/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-67e49b73-4378-4d9b-aa07-eb22704d83ae1750992411736-2025_06_26-19.46.53.239/source.csv
ADDED
@@ -0,0 +1,241 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,2,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log :\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication. \n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
3 |
+
2,27,"tasks",0,0,"",Log,tab
|
4 |
+
3,28,"train_tokenizer.py",0,0,"",python,tab
|
5 |
+
4,59,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
6 |
+
5,70,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:46:53 PM [info] Activating crowd-code\n7:46:53 PM [info] Recording started\n7:46:53 PM [info] Initializing git provider using file system watchers...\n7:46:53 PM [info] Git repository found\n7:46:53 PM [info] Git provider initialized successfully\n",Log,content
|
7 |
+
6,82,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"7:46:53 PM [info] Initial git state: [object Object]\n",Log,content
|
8 |
+
7,6647,"train_tokenizer.py",0,0,"",python,tab
|
9 |
+
8,10063,"train_tokenizer.py",0,0,"Switched from branch 'fix-multiprocess-image-logging' to 'main'",python,git_branch_checkout
|
10 |
+
9,20323,"utils/dataloader.py",0,0,"from cgi import test\nimport functools\nimport jax\n\nimport tensorflow as tf\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n cache_processed_data: bool = True,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert global_batch_size % num_processes == 0, ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n \n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.cache() if cache_processed_data else dataset\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
11 |
+
10,22376,"train_tokenizer.py",0,0,"from dataclasses import dataclass\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params, inputs, training=True, rngs={""dropout"": inputs[""rng""]}\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(entity=args.entity, project=args.project, group=""debug"", config=args)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng = jax.random.split(rng)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log :\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log({""loss"": loss, ""step"": step, **metrics})\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication. \n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
12 |
+
11,24879,"utils/dataloader.py",0,0,"",python,tab
|
13 |
+
12,26164,"utils/dataloader.py",900,0,"",python,selection_command
|
14 |
+
13,26410,"utils/dataloader.py",1591,0,"",python,selection_command
|
15 |
+
14,26446,"utils/dataloader.py",2494,0,"",python,selection_command
|
16 |
+
15,26473,"utils/dataloader.py",3222,0,"",python,selection_command
|
17 |
+
16,26506,"utils/dataloader.py",4041,0,"",python,selection_command
|
18 |
+
17,26540,"utils/dataloader.py",4367,0,"",python,selection_command
|
19 |
+
18,27279,"utils/dataloader.py",0,0,"",python,selection_command
|
20 |
+
19,27700,"utils/dataloader.py",0,21,"",python,content
|
21 |
+
20,28587,"train_tokenizer.py",0,0,"",python,tab
|
22 |
+
21,30438,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"7:46:53 PM [info] Activating crowd-code\n7:46:53 PM [info] Recording started\n7:46:53 PM [info] Initializing git provider using file system watchers...\n7:46:53 PM [info] Git repository found\n7:46:53 PM [info] Git provider initialized successfully\n7:46:53 PM [info] Initial git state: [object Object]\n7:47:03 PM [info] Branch checkout detected: fix-multiprocess-image-logging -> main\n7:47:03 PM [info] Recording git checkout: Switched from branch 'fix-multiprocess-image-logging' to 'main'\n7:47:03 PM [info] Resetting file cache due to branch checkout\n",Log,tab
|
23 |
+
22,31032,"train_tokenizer.py",0,0,"",python,tab
|
24 |
+
23,34437,"train_tokenizer.py",0,7825,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n",python,content
|
25 |
+
24,59345,"train_tokenizer.py",7447,0,"",python,selection_mouse
|
26 |
+
25,60087,"train_tokenizer.py",0,0,"Switched from branch 'main' to 'proper-seeding-dataloader'",python,git_branch_checkout
|
27 |
+
26,82111,"train_tokenizer.py",532,0,"",python,selection_command
|
28 |
+
27,82341,"train_tokenizer.py",6425,0,"",python,selection_command
|
29 |
+
28,83082,"train_tokenizer.py",6458,0,"",python,selection_command
|
30 |
+
29,83330,"train_tokenizer.py",6517,0,"",python,selection_command
|
31 |
+
30,83364,"train_tokenizer.py",6582,0,"",python,selection_command
|
32 |
+
31,83395,"train_tokenizer.py",6606,0,"",python,selection_command
|
33 |
+
32,83424,"train_tokenizer.py",6628,0,"",python,selection_command
|
34 |
+
33,83582,"train_tokenizer.py",6653,0,"",python,selection_command
|
35 |
+
34,83834,"train_tokenizer.py",6657,0,"\n ",python,content
|
36 |
+
35,84049,"train_tokenizer.py",6666,0,"s",python,content
|
37 |
+
36,84051,"train_tokenizer.py",6667,0,"",python,selection_keyboard
|
38 |
+
37,84131,"train_tokenizer.py",6667,0,"e",python,content
|
39 |
+
38,84132,"train_tokenizer.py",6668,0,"",python,selection_keyboard
|
40 |
+
39,84292,"train_tokenizer.py",6668,0,"e",python,content
|
41 |
+
40,84294,"train_tokenizer.py",6669,0,"",python,selection_keyboard
|
42 |
+
41,84404,"train_tokenizer.py",6669,0,"d",python,content
|
43 |
+
42,84405,"train_tokenizer.py",6670,0,"",python,selection_keyboard
|
44 |
+
43,84562,"train_tokenizer.py",6670,0,"=",python,content
|
45 |
+
44,84565,"train_tokenizer.py",6671,0,"",python,selection_keyboard
|
46 |
+
45,84827,"train_tokenizer.py",6671,0,"s",python,content
|
47 |
+
46,84830,"train_tokenizer.py",6672,0,"",python,selection_keyboard
|
48 |
+
47,84894,"train_tokenizer.py",6672,0,"e",python,content
|
49 |
+
48,84896,"train_tokenizer.py",6673,0,"",python,selection_keyboard
|
50 |
+
49,85077,"train_tokenizer.py",6673,0,"e",python,content
|
51 |
+
50,85079,"train_tokenizer.py",6674,0,"",python,selection_keyboard
|
52 |
+
51,85148,"train_tokenizer.py",6674,0,"d",python,content
|
53 |
+
52,85151,"train_tokenizer.py",6675,0,"",python,selection_keyboard
|
54 |
+
53,85506,"train_tokenizer.py",6674,1,"",python,content
|
55 |
+
54,85675,"train_tokenizer.py",6673,1,"",python,content
|
56 |
+
55,85813,"train_tokenizer.py",6672,1,"",python,content
|
57 |
+
56,85954,"train_tokenizer.py",6671,1,"",python,content
|
58 |
+
57,86027,"train_tokenizer.py",6671,0,"a",python,content
|
59 |
+
58,86028,"train_tokenizer.py",6672,0,"",python,selection_keyboard
|
60 |
+
59,86496,"train_tokenizer.py",6672,0,"r",python,content
|
61 |
+
60,86501,"train_tokenizer.py",6673,0,"",python,selection_keyboard
|
62 |
+
61,86659,"train_tokenizer.py",6673,0,"g",python,content
|
63 |
+
62,86662,"train_tokenizer.py",6674,0,"",python,selection_keyboard
|
64 |
+
63,86712,"train_tokenizer.py",6674,0,"s",python,content
|
65 |
+
64,86718,"train_tokenizer.py",6675,0,"",python,selection_keyboard
|
66 |
+
65,86790,"train_tokenizer.py",6675,0,".",python,content
|
67 |
+
66,86795,"train_tokenizer.py",6676,0,"",python,selection_keyboard
|
68 |
+
67,86928,"train_tokenizer.py",6676,0,"s",python,content
|
69 |
+
68,86931,"train_tokenizer.py",6677,0,"",python,selection_keyboard
|
70 |
+
69,87007,"train_tokenizer.py",6677,0,"e",python,content
|
71 |
+
70,87013,"train_tokenizer.py",6678,0,"",python,selection_keyboard
|
72 |
+
71,87196,"train_tokenizer.py",6678,0,"e",python,content
|
73 |
+
72,87202,"train_tokenizer.py",6679,0,"",python,selection_keyboard
|
74 |
+
73,87295,"train_tokenizer.py",6679,0,"d",python,content
|
75 |
+
74,87300,"train_tokenizer.py",6680,0,"",python,selection_keyboard
|
76 |
+
75,87429,"train_tokenizer.py",6680,0,",",python,content
|
77 |
+
76,87432,"train_tokenizer.py",6681,0,"",python,selection_keyboard
|
78 |
+
77,87560,"train_tokenizer.py",6680,0,"",python,selection_command
|
79 |
+
78,88768,"train_tokenizer.py",6680,1,"",python,content
|
80 |
+
79,88775,"train_tokenizer.py",6679,0,"",python,selection_command
|
81 |
+
80,89370,"train_tokenizer.py",6680,0,"",python,selection_command
|
82 |
+
81,89528,"train_tokenizer.py",6680,0,",",python,content
|
83 |
+
82,89530,"train_tokenizer.py",6681,0,"",python,selection_keyboard
|
84 |
+
83,89641,"train_tokenizer.py",6680,0,"",python,selection_command
|
85 |
+
84,96963,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
86 |
+
85,101346,"utils/dataloader.py",0,0,"",python,selection_command
|
87 |
+
86,103097,"utils/dataloader.py",258,0,"",python,selection_command
|
88 |
+
87,103665,"utils/dataloader.py",329,0,"",python,selection_command
|
89 |
+
88,103932,"utils/dataloader.py",328,0,"",python,selection_command
|
90 |
+
89,104129,"utils/dataloader.py",327,0,"",python,selection_command
|
91 |
+
90,104547,"utils/dataloader.py",327,0," ",python,content
|
92 |
+
91,104551,"utils/dataloader.py",328,0,"",python,selection_keyboard
|
93 |
+
92,104823,"utils/dataloader.py",327,1,"",python,content
|
94 |
+
93,105013,"utils/dataloader.py",327,0,",",python,content
|
95 |
+
94,105015,"utils/dataloader.py",328,0,"",python,selection_keyboard
|
96 |
+
95,105094,"utils/dataloader.py",328,0," ",python,content
|
97 |
+
96,105095,"utils/dataloader.py",329,0,"",python,selection_keyboard
|
98 |
+
97,105158,"utils/dataloader.py",329,0,"s",python,content
|
99 |
+
98,105161,"utils/dataloader.py",330,0,"",python,selection_keyboard
|
100 |
+
99,105221,"utils/dataloader.py",330,0,"e",python,content
|
101 |
+
100,105226,"utils/dataloader.py",331,0,"",python,selection_keyboard
|
102 |
+
101,105395,"utils/dataloader.py",331,0,"e",python,content
|
103 |
+
102,105400,"utils/dataloader.py",332,0,"",python,selection_keyboard
|
104 |
+
103,105472,"utils/dataloader.py",332,0,"d",python,content
|
105 |
+
104,105477,"utils/dataloader.py",333,0,"",python,selection_keyboard
|
106 |
+
105,107401,"utils/dataloader.py",332,0,"",python,selection_command
|
107 |
+
106,107747,"utils/dataloader.py",342,0,"",python,selection_command
|
108 |
+
107,107995,"utils/dataloader.py",392,0,"",python,selection_command
|
109 |
+
108,108025,"utils/dataloader.py",465,0,"",python,selection_command
|
110 |
+
109,108057,"utils/dataloader.py",475,0,"",python,selection_command
|
111 |
+
110,108090,"utils/dataloader.py",554,0,"",python,selection_command
|
112 |
+
111,108122,"utils/dataloader.py",635,0,"",python,selection_command
|
113 |
+
112,108156,"utils/dataloader.py",712,0,"",python,selection_command
|
114 |
+
113,108190,"utils/dataloader.py",780,0,"",python,selection_command
|
115 |
+
114,108226,"utils/dataloader.py",823,0,"",python,selection_command
|
116 |
+
115,108532,"utils/dataloader.py",865,0,"",python,selection_command
|
117 |
+
116,108694,"utils/dataloader.py",920,0,"",python,selection_command
|
118 |
+
117,108845,"utils/dataloader.py",933,0,"",python,selection_command
|
119 |
+
118,108993,"utils/dataloader.py",1004,0,"",python,selection_command
|
120 |
+
119,109161,"utils/dataloader.py",933,0,"",python,selection_command
|
121 |
+
120,109317,"utils/dataloader.py",920,0,"",python,selection_command
|
122 |
+
121,109885,"utils/dataloader.py",921,0,"\n ",python,content
|
123 |
+
122,110783,"utils/dataloader.py",930,0,"seed: The seed for the random number generator.",python,content
|
124 |
+
123,111064,"utils/dataloader.py",976,0,"",python,selection_command
|
125 |
+
124,117529,"utils/dataloader.py",989,0,"",python,selection_command
|
126 |
+
125,117778,"utils/dataloader.py",1045,0,"",python,selection_command
|
127 |
+
126,117811,"utils/dataloader.py",1112,0,"",python,selection_command
|
128 |
+
127,117840,"utils/dataloader.py",1164,0,"",python,selection_command
|
129 |
+
128,117873,"utils/dataloader.py",1172,0,"",python,selection_command
|
130 |
+
129,117907,"utils/dataloader.py",1226,0,"",python,selection_command
|
131 |
+
130,117943,"utils/dataloader.py",1228,0,"",python,selection_command
|
132 |
+
131,117974,"utils/dataloader.py",1277,0,"",python,selection_command
|
133 |
+
132,118006,"utils/dataloader.py",1279,0,"",python,selection_command
|
134 |
+
133,118041,"utils/dataloader.py",1313,0,"",python,selection_command
|
135 |
+
134,118212,"utils/dataloader.py",1369,0,"",python,selection_command
|
136 |
+
135,118426,"utils/dataloader.py",1388,0,"",python,selection_command
|
137 |
+
136,118612,"utils/dataloader.py",1390,0,"",python,selection_command
|
138 |
+
137,118761,"utils/dataloader.py",1388,0,"",python,selection_command
|
139 |
+
138,118943,"utils/dataloader.py",1369,0,"",python,selection_command
|
140 |
+
139,119216,"utils/dataloader.py",1383,0,"",python,selection_command
|
141 |
+
140,119396,"utils/dataloader.py",1383,0,",",python,content
|
142 |
+
141,119402,"utils/dataloader.py",1384,0,"",python,selection_keyboard
|
143 |
+
142,119527,"utils/dataloader.py",1384,0," ",python,content
|
144 |
+
143,119531,"utils/dataloader.py",1385,0,"",python,selection_keyboard
|
145 |
+
144,119718,"utils/dataloader.py",1385,0,"seed=seed",python,content
|
146 |
+
145,119947,"utils/dataloader.py",1393,0,"",python,selection_command
|
147 |
+
146,139247,"utils/dataloader.py",1711,0,"",python,selection_command
|
148 |
+
147,139494,"utils/dataloader.py",1810,0,"",python,selection_command
|
149 |
+
148,139831,"utils/dataloader.py",2286,0,"",python,selection_command
|
150 |
+
149,140416,"utils/dataloader.py",2515,0,"",python,selection_command
|
151 |
+
150,140813,"utils/dataloader.py",2703,0,"",python,selection_command
|
152 |
+
151,141584,"utils/dataloader.py",3869,0,"",python,selection_command
|
153 |
+
152,148097,"utils/dataloader.py",3923,0,"",python,selection_command
|
154 |
+
153,148260,"utils/dataloader.py",3953,0,"",python,selection_command
|
155 |
+
154,151496,"utils/dataloader.py",3923,0,"",python,selection_command
|
156 |
+
155,174924,"utils/dataloader.py",3953,0,"",python,selection_command
|
157 |
+
156,175063,"utils/dataloader.py",4000,0,"",python,selection_command
|
158 |
+
157,175208,"utils/dataloader.py",4029,0,"",python,selection_command
|
159 |
+
158,175345,"utils/dataloader.py",4067,0,"",python,selection_command
|
160 |
+
159,175627,"utils/dataloader.py",4029,0,"",python,selection_command
|
161 |
+
160,175878,"utils/dataloader.py",4000,0,"",python,selection_command
|
162 |
+
161,175911,"utils/dataloader.py",3953,0,"",python,selection_command
|
163 |
+
162,176019,"utils/dataloader.py",3923,0,"",python,selection_command
|
164 |
+
163,176192,"utils/dataloader.py",3869,0,"",python,selection_command
|
165 |
+
164,182197,"utils/dataloader.py",3923,0,"",python,selection_command
|
166 |
+
165,182443,"utils/dataloader.py",3953,0,"",python,selection_command
|
167 |
+
166,182474,"utils/dataloader.py",4000,0,"",python,selection_command
|
168 |
+
167,182506,"utils/dataloader.py",4029,0,"",python,selection_command
|
169 |
+
168,182542,"utils/dataloader.py",4067,0,"",python,selection_command
|
170 |
+
169,182575,"utils/dataloader.py",4092,0,"",python,selection_command
|
171 |
+
170,182609,"utils/dataloader.py",4117,0,"",python,selection_command
|
172 |
+
171,182642,"utils/dataloader.py",4142,0,"",python,selection_command
|
173 |
+
172,182675,"utils/dataloader.py",4167,0,"",python,selection_command
|
174 |
+
173,182708,"utils/dataloader.py",4173,0,"",python,selection_command
|
175 |
+
174,182742,"utils/dataloader.py",4203,0,"",python,selection_command
|
176 |
+
175,182905,"utils/dataloader.py",4255,0,"",python,selection_command
|
177 |
+
176,183278,"utils/dataloader.py",4203,0,"",python,selection_command
|
178 |
+
177,184182,"utils/dataloader.py",4173,0,"",python,selection_command
|
179 |
+
178,184437,"utils/dataloader.py",4167,0,"",python,selection_command
|
180 |
+
179,184464,"utils/dataloader.py",4142,0,"",python,selection_command
|
181 |
+
180,184630,"utils/dataloader.py",4167,0,"",python,selection_command
|
182 |
+
181,184793,"utils/dataloader.py",4173,0,"",python,selection_command
|
183 |
+
182,185078,"utils/dataloader.py",4167,0,"",python,selection_command
|
184 |
+
183,185298,"utils/dataloader.py",4168,0,"",python,selection_command
|
185 |
+
184,185462,"utils/dataloader.py",4168,0,"\n ",python,content
|
186 |
+
185,186233,"utils/dataloader.py",4177,0,"seed=seed,",python,content
|
187 |
+
186,186506,"utils/dataloader.py",4186,0,"",python,selection_command
|
188 |
+
187,198474,"utils/dataloader.py",4161,0,"",python,selection_command
|
189 |
+
188,198726,"utils/dataloader.py",4136,0,"",python,selection_command
|
190 |
+
189,198760,"utils/dataloader.py",4111,0,"",python,selection_command
|
191 |
+
190,198869,"utils/dataloader.py",4086,0,"",python,selection_command
|
192 |
+
191,199047,"utils/dataloader.py",4057,0,"",python,selection_command
|
193 |
+
192,200749,"utils/dataloader.py",4057,0,"",python,selection_command
|
194 |
+
193,202978,"utils/dataloader.py",276,0,"",python,selection_command
|
195 |
+
194,203226,"utils/dataloader.py",277,0,"",python,selection_command
|
196 |
+
195,203260,"utils/dataloader.py",291,0,"",python,selection_command
|
197 |
+
196,203290,"utils/dataloader.py",293,0,"",python,selection_command
|
198 |
+
197,203322,"utils/dataloader.py",300,0,"",python,selection_command
|
199 |
+
198,203357,"utils/dataloader.py",302,0,"",python,selection_command
|
200 |
+
199,203391,"utils/dataloader.py",309,0,"",python,selection_command
|
201 |
+
200,203424,"utils/dataloader.py",311,0,"",python,selection_command
|
202 |
+
201,203706,"utils/dataloader.py",318,0,"",python,selection_command
|
203 |
+
202,203900,"utils/dataloader.py",320,0,"",python,selection_command
|
204 |
+
203,204081,"utils/dataloader.py",327,0,"",python,selection_command
|
205 |
+
204,204709,"utils/dataloader.py",329,0,"",python,selection_command
|
206 |
+
205,205294,"utils/dataloader.py",930,0,"",python,selection_command
|
207 |
+
206,205878,"utils/dataloader.py",940,0,"",python,selection_command
|
208 |
+
207,206129,"utils/dataloader.py",1385,0,"",python,selection_command
|
209 |
+
208,206376,"utils/dataloader.py",1390,0,"",python,selection_command
|
210 |
+
209,206845,"utils/dataloader.py",2842,0,"",python,selection_command
|
211 |
+
210,235602,"utils/dataloader.py",0,0,"",python,tab
|
212 |
+
211,235615,"utils/dataloader.py",253,0,"",python,selection_command
|
213 |
+
212,236452,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
214 |
+
213,236464,"train_tokenizer.py",6658,0,"",python,selection_command
|
215 |
+
214,239808,"utils/dataloader.py",0,0,"",python,tab
|
216 |
+
215,239819,"utils/dataloader.py",253,0,"",python,selection_command
|
217 |
+
216,288398,"utils/dataloader.py",0,0,"",python,tab
|
218 |
+
217,290847,"utils/dataloader.py",253,3935,"def _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n",python,content
|
219 |
+
218,295225,"utils/dataloader.py",0,0,"Switched from branch 'proper-seeding-dataloader' to 'main'",python,git_branch_checkout
|
220 |
+
219,303254,"utils/dataloader.py",2716,0,"",python,selection_mouse
|
221 |
+
220,303260,"utils/dataloader.py",2715,0,"",python,selection_command
|
222 |
+
221,305241,"utils/dataloader.py",0,0,"Switched from branch 'main' to 'dataloader-reproducibility-test'",python,git_branch_checkout
|
223 |
+
222,380349,"tests/data/generate_dummy_data.py",0,0,"",python,tab
|
224 |
+
223,389063,"tests/data/generate_dummy_data.py",0,0,"import tyro\nimport tensorflow as tf\nimport numpy as np\nfrom pathlib import Path\nfrom dataclasses import dataclass\n\n@dataclass\nclass Args:\n data_dir: str = ""data_tfrecords/dummy""\n num_episodes: int = 5\n episode_length: int = 16\n\n\n\ndef _bytes_feature(value):\n """"""Returns a bytes_list from a string / byte.""""""\n if isinstance(value, type(tf.constant(0))):\n value = value.numpy() # BytesList won't unpack a string from an EagerTensor.\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n """"""Returns an int64_list from a bool / enum / int / uint.""""""\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef create_tfrecord_example(episode_numpy_array):\n """"""Creates a TFRecord example from a numpy array video.""""""\n feature = {\n ""height"": _int64_feature(episode_numpy_array.shape[1]),\n ""width"": _int64_feature(episode_numpy_array.shape[2]),\n ""channels"": _int64_feature(episode_numpy_array.shape[3]),\n ""sequence_length"": _int64_feature(episode_numpy_array.shape[0]),\n ""raw_video"": _bytes_feature(episode_numpy_array.tobytes()),\n }\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\n\ndef generate_dummy_tfrecord(\n output_path, num_episodes=5, episode_length=16, height=90, width=160, channels=3\n):\n """"""Generates a dummy TFRecord file with synthetic video data.""""""\n print(f""Generating dummy TFRecord file at {output_path}"")\n with tf.io.TFRecordWriter(str(output_path)) as writer:\n for i in range(num_episodes):\n np.random.seed(i) # Seed per episode for some variation, but deterministic\n dummy_video = np.random.randint(\n 0, 256, size=(episode_length, height, width, channels), dtype=np.uint8\n )\n tf_example = create_tfrecord_example(dummy_video)\n writer.write(tf_example.SerializeToString())\n print(""Dummy TFRecord generation complete."")\n\n\nif __name__ == ""__main__"":\n args = tyro.cli(Args)\n temp_dir = Path(args.data_dir)\n temp_dir.mkdir(parents=True, exist_ok=True)\n dummy_file = temp_dir / ""dummy_test_shard.tfrecord""\n generate_dummy_tfrecord(dummy_file, num_episodes=args.num_episodes, episode_length=args.episode_length)\n print(f""Generated dummy file: {dummy_file}"")",python,content
|
225 |
+
224,389406,"tests/data/generate_dummy_data.py",2337,0,"",python,selection_command
|
226 |
+
225,390350,"tests/data/generate_dummy_data.py",0,0,"",python,selection_command
|
227 |
+
226,400299,"tests/test_dataloader.py",0,0,"",python,tab
|
228 |
+
227,406857,"tests/test_dataloader.py",0,0,"import unittest\nimport numpy as np\nimport tensorflow as tf\nimport tempfile\nfrom pathlib import Path\n\nfrom utils.dataloader import get_dataloader\nfrom tests.data.generate_dummy_tfrecord import generate_dummy_tfrecord\n\n\nclass DataloaderReproducibilityTest(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n self._temp_dir_manager = tempfile.TemporaryDirectory()\n self.test_data_dir = Path(self._temp_dir_manager.name)\n self.addCleanup(self._temp_dir_manager.cleanup)\n self.dummy_tfrecord_path = self.test_data_dir / ""dummy_test_shard.tfrecord""\n\n self.num_episodes = 5\n self.episode_length = 16\n self.image_height = 64\n self.image_width = 64\n self.image_channels = 3\n generate_dummy_tfrecord(\n self.dummy_tfrecord_path,\n num_episodes=self.num_episodes,\n episode_length=self.episode_length,\n height=self.image_height,\n width=self.image_width,\n channels=self.image_channels,\n )\n self.tfrecord_files = [str(self.dummy_tfrecord_path)]\n\n self.fixed_seed = 42\n\n def test_dataloader_yields_reproducible_batches(self):\n seq_len = 8\n batch_size = 2\n\n dataloader1 = get_dataloader(\n self.tfrecord_files,\n seq_len,\n batch_size,\n self.image_height,\n self.image_width,\n self.image_channels,\n seed=self.fixed_seed,\n )\n batches1 = [next(dataloader1) for _ in range(3)]\n\n dataloader2 = get_dataloader(\n self.tfrecord_files,\n seq_len,\n batch_size,\n self.image_height,\n self.image_width,\n self.image_channels,\n seed=self.fixed_seed,\n )\n batches2 = [next(dataloader2) for _ in range(3)]\n\n for i, (b1, b2) in enumerate(zip(batches1, batches2)):\n np.testing.assert_array_equal(b1, b2, err_msg=f""Batch {i} is not reproducible"") # type: ignore\n\n\nif __name__ == ""__main__"":\n unittest.main()",python,content
|
229 |
+
228,407175,"tests/test_dataloader.py",2070,0,"",python,selection_command
|
230 |
+
229,407617,"tests/test_dataloader.py",0,0,"",python,selection_command
|
231 |
+
230,410557,"tests/test_dataloader.py",16,0,"",python,selection_command
|
232 |
+
231,410809,"tests/test_dataloader.py",35,0,"",python,selection_command
|
233 |
+
232,410841,"tests/test_dataloader.py",59,0,"",python,selection_command
|
234 |
+
233,410873,"tests/test_dataloader.py",75,0,"",python,selection_command
|
235 |
+
234,410907,"tests/test_dataloader.py",100,0,"",python,selection_command
|
236 |
+
235,410940,"tests/test_dataloader.py",101,0,"",python,selection_command
|
237 |
+
236,411050,"tests/test_dataloader.py",145,0,"",python,selection_command
|
238 |
+
237,411208,"tests/test_dataloader.py",216,0,"",python,selection_command
|
239 |
+
238,411358,"tests/test_dataloader.py",217,0,"",python,selection_command
|
240 |
+
239,413643,"tests/data/generate_dummy_data.py",0,0,"",python,tab
|
241 |
+
240,418693,"tests/test_dataloader.py",0,0,"",python,tab
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-7f860396-c5c8-4f1f-8ce7-04e005748e611754402256906-2025_08_05-15.57.44.850/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-84bc9952-c4b0-4456-bdc2-984faf53684f1751163593750-2025_06_28-19.19.55.196/source.csv
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,3,"tasks",0,0,"",Log,tab
|
3 |
+
2,18,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
4 |
+
3,40,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
5 |
+
4,41,"utils/dataloader.py",0,0,"",python,tab
|
6 |
+
5,1469,"utils/dataloader.py",0,0,"",python,selection_command
|
7 |
+
6,3873,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
8 |
+
7,5631,"models/tokenizer.py",0,0,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = nn.sigmoid(recon)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = nn.sigmoid(recon)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,tab
|
9 |
+
8,6632,"models/tokenizer.py",1009,0,"",python,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-895267d6-5fbc-45e8-bc56-0d7c756881181750708632303-2025_06_23-12.57.13.921/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-8e0958c9-e396-41d9-b3d4-8a748cefa1701750701699946-2025_06_23-11.01.41.744/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-978ed4eb-d9c9-4380-b981-e501087459181750623968304-2025_06_22-13.26.11.394/source.csv
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,13,"utils/dataloader.py",0,0,"from cgi import test\nimport functools\nimport jax\n\nimport tensorflow as tf\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n cache_processed_data: bool = True,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert global_batch_size % num_processes == 0, ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n \n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.cache() if cache_processed_data else dataset\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
3 |
+
2,27,"tasks",0,0,"",Log,tab
|
4 |
+
3,36,"utils/dataloader.py",0,0,"",python,tab
|
5 |
+
4,56,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
6 |
+
5,869,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"1:26:11 PM [info] Activating crowd-code\n1:26:11 PM [info] Recording started\n",Log,content
|
7 |
+
6,39651,"utils/dataloader.py",0,0,"",python,tab
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b00cd52f-686b-4cad-89ec-cf5dcdc287a11753702370531-2025_07_28-13.32.59.505/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b08d92a3-9c0a-4526-b12f-c973e9c3c43f1752071802867-2025_07_09-16.36.43.962/source.csv
ADDED
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
2,53,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"4:36:43 PM [info] Activating crowd-code\n4:36:43 PM [info] Recording started\n4:36:43 PM [info] Initializing git provider using file system watchers...\n4:36:43 PM [info] No workspace folder found\n",Log,tab
|
3 |
+
3,94480,"/Users/franzsrambical/.ssh/config",0,0,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical\n\nHost hpc-build01.scidom.de\n HostName hpc-build01.scidom.de\n User franz.srambical\n\nHost horeka.scc.kit.edu\n HostName horeka.scc.kit.edu\n User tum_dbd0378\n\nHost juwels-cluster.fz-juelich.de\n HostName juwels-cluster.fz-juelich.de\n IdentityFile ~/.ssh/id_ed25519\n User srambical2\n\nHost hpc-submit02.scidom.de\n HostName hpc-submit02.scidom.de\n User franz.srambical\n\nHost hpc-submit01.scidom.de\n HostName hpc-submit01.scidom.de\n User franz.srambical",plaintext,tab
|
4 |
+
4,95685,"/Users/franzsrambical/.ssh/config",26,0,"",plaintext,selection_command
|
5 |
+
5,95929,"/Users/franzsrambical/.ssh/config",58,0,"",plaintext,selection_command
|
6 |
+
6,95947,"/Users/franzsrambical/.ssh/config",81,0,"",plaintext,selection_command
|
7 |
+
7,95981,"/Users/franzsrambical/.ssh/config",82,0,"",plaintext,selection_command
|
8 |
+
8,96015,"/Users/franzsrambical/.ssh/config",109,0,"",plaintext,selection_command
|
9 |
+
9,96048,"/Users/franzsrambical/.ssh/config",142,0,"",plaintext,selection_command
|
10 |
+
10,96082,"/Users/franzsrambical/.ssh/config",165,0,"",plaintext,selection_command
|
11 |
+
11,96117,"/Users/franzsrambical/.ssh/config",166,0,"",plaintext,selection_command
|
12 |
+
12,96150,"/Users/franzsrambical/.ssh/config",190,0,"",plaintext,selection_command
|
13 |
+
13,96184,"/Users/franzsrambical/.ssh/config",220,0,"",plaintext,selection_command
|
14 |
+
14,96221,"/Users/franzsrambical/.ssh/config",190,0,"",plaintext,selection_command
|
15 |
+
15,96485,"/Users/franzsrambical/.ssh/config",166,0,"",plaintext,selection_command
|
16 |
+
16,96514,"/Users/franzsrambical/.ssh/config",165,0,"",plaintext,selection_command
|
17 |
+
17,96547,"/Users/franzsrambical/.ssh/config",142,0,"",plaintext,selection_command
|
18 |
+
18,96575,"/Users/franzsrambical/.ssh/config",109,0,"",plaintext,selection_command
|
19 |
+
19,96607,"/Users/franzsrambical/.ssh/config",82,0,"",plaintext,selection_command
|
20 |
+
20,96638,"/Users/franzsrambical/.ssh/config",81,0,"",plaintext,selection_command
|
21 |
+
21,96671,"/Users/franzsrambical/.ssh/config",58,0,"",plaintext,selection_command
|
22 |
+
22,96705,"/Users/franzsrambical/.ssh/config",26,0,"",plaintext,selection_command
|
23 |
+
23,96739,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,selection_command
|
24 |
+
24,100915,"/Users/franzsrambical/.ssh/config",0,25,"Host login.haicore.berlin",plaintext,selection_command
|
25 |
+
25,101063,"/Users/franzsrambical/.ssh/config",0,57,"Host login.haicore.berlin\n HostName login.haicore.berlin",plaintext,selection_command
|
26 |
+
26,101231,"/Users/franzsrambical/.ssh/config",0,80,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical",plaintext,selection_command
|
27 |
+
27,101369,"/Users/franzsrambical/.ssh/config",0,81,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical\n",plaintext,selection_command
|
28 |
+
28,101616,"/Users/franzsrambical/.ssh/config",0,82,"",plaintext,content
|
29 |
+
29,106243,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
30 |
+
30,106263,"/Users/franzsrambical/.ssh/config",454,0,"\n",plaintext,content
|
31 |
+
31,108988,"/Users/franzsrambical/.ssh/config",454,1,"",plaintext,content
|
32 |
+
32,112261,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
33 |
+
33,112284,"/Users/franzsrambical/.ssh/config",454,0,"\n",plaintext,content
|
34 |
+
34,117860,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
35 |
+
35,119181,"/Users/franzsrambical/.ssh/config",27,0,"",plaintext,selection_command
|
36 |
+
36,119428,"/Users/franzsrambical/.ssh/config",60,0,"",plaintext,selection_command
|
37 |
+
37,119459,"/Users/franzsrambical/.ssh/config",83,0,"",plaintext,selection_command
|
38 |
+
38,119491,"/Users/franzsrambical/.ssh/config",84,0,"",plaintext,selection_command
|
39 |
+
39,119521,"/Users/franzsrambical/.ssh/config",108,0,"",plaintext,selection_command
|
40 |
+
40,119553,"/Users/franzsrambical/.ssh/config",138,0,"",plaintext,selection_command
|
41 |
+
41,119587,"/Users/franzsrambical/.ssh/config",157,0,"",plaintext,selection_command
|
42 |
+
42,119621,"/Users/franzsrambical/.ssh/config",158,0,"",plaintext,selection_command
|
43 |
+
43,119654,"/Users/franzsrambical/.ssh/config",192,0,"",plaintext,selection_command
|
44 |
+
44,119688,"/Users/franzsrambical/.ssh/config",232,0,"",plaintext,selection_command
|
45 |
+
45,119721,"/Users/franzsrambical/.ssh/config",265,0,"",plaintext,selection_command
|
46 |
+
46,119755,"/Users/franzsrambical/.ssh/config",283,0,"",plaintext,selection_command
|
47 |
+
47,119788,"/Users/franzsrambical/.ssh/config",284,0,"",plaintext,selection_command
|
48 |
+
48,119821,"/Users/franzsrambical/.ssh/config",312,0,"",plaintext,selection_command
|
49 |
+
49,119855,"/Users/franzsrambical/.ssh/config",346,0,"",plaintext,selection_command
|
50 |
+
50,119889,"/Users/franzsrambical/.ssh/config",369,0,"",plaintext,selection_command
|
51 |
+
51,119922,"/Users/franzsrambical/.ssh/config",370,0,"",plaintext,selection_command
|
52 |
+
52,119958,"/Users/franzsrambical/.ssh/config",398,0,"",plaintext,selection_command
|
53 |
+
53,119991,"/Users/franzsrambical/.ssh/config",432,0,"",plaintext,selection_command
|
54 |
+
54,120022,"/Users/franzsrambical/.ssh/config",455,0,"",plaintext,selection_command
|
55 |
+
55,128496,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
56 |
+
56,128583,"/Users/franzsrambical/.ssh/config",456,0,"ssh [email protected]",plaintext,content
|
57 |
+
57,128584,"/Users/franzsrambical/.ssh/config",496,0,"",plaintext,selection_keyboard
|
58 |
+
58,130098,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
59 |
+
59,131052,"/Users/franzsrambical/.ssh/config",456,40,"ssh [email protected]",plaintext,selection_command
|
60 |
+
60,132210,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
61 |
+
61,132523,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
62 |
+
62,132854,"/Users/franzsrambical/.ssh/config",456,0,"H",plaintext,content
|
63 |
+
63,132856,"/Users/franzsrambical/.ssh/config",457,0,"",plaintext,selection_keyboard
|
64 |
+
64,133188,"/Users/franzsrambical/.ssh/config",457,0,"o",plaintext,content
|
65 |
+
65,133191,"/Users/franzsrambical/.ssh/config",458,0,"",plaintext,selection_keyboard
|
66 |
+
66,133287,"/Users/franzsrambical/.ssh/config",458,0,"s",plaintext,content
|
67 |
+
67,133290,"/Users/franzsrambical/.ssh/config",459,0,"",plaintext,selection_keyboard
|
68 |
+
68,133387,"/Users/franzsrambical/.ssh/config",459,0,"t",plaintext,content
|
69 |
+
69,133389,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_keyboard
|
70 |
+
70,133432,"/Users/franzsrambical/.ssh/config",460,0," ",plaintext,content
|
71 |
+
71,133434,"/Users/franzsrambical/.ssh/config",461,0,"",plaintext,selection_keyboard
|
72 |
+
72,146263,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_command
|
73 |
+
73,146405,"/Users/franzsrambical/.ssh/config",456,6,"",plaintext,content
|
74 |
+
74,146409,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
75 |
+
75,146556,"/Users/franzsrambical/.ssh/config",455,41,"",plaintext,content
|
76 |
+
76,148933,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
77 |
+
77,149329,"/Users/franzsrambical/.ssh/config",456,0,"H",plaintext,content
|
78 |
+
78,149331,"/Users/franzsrambical/.ssh/config",457,0,"",plaintext,selection_keyboard
|
79 |
+
79,149492,"/Users/franzsrambical/.ssh/config",457,0,"o",plaintext,content
|
80 |
+
80,149495,"/Users/franzsrambical/.ssh/config",458,0,"",plaintext,selection_keyboard
|
81 |
+
81,149560,"/Users/franzsrambical/.ssh/config",458,0,"s",plaintext,content
|
82 |
+
82,149562,"/Users/franzsrambical/.ssh/config",459,0,"",plaintext,selection_keyboard
|
83 |
+
83,149610,"/Users/franzsrambical/.ssh/config",459,0,"t",plaintext,content
|
84 |
+
84,149612,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_keyboard
|
85 |
+
85,149655,"/Users/franzsrambical/.ssh/config",460,0," ",plaintext,content
|
86 |
+
86,149657,"/Users/franzsrambical/.ssh/config",461,0,"",plaintext,selection_keyboard
|
87 |
+
87,153162,"/Users/franzsrambical/.ssh/config",461,0,"l",plaintext,content
|
88 |
+
88,153164,"/Users/franzsrambical/.ssh/config",462,0,"",plaintext,selection_keyboard
|
89 |
+
89,153299,"/Users/franzsrambical/.ssh/config",462,0,"o",plaintext,content
|
90 |
+
90,153301,"/Users/franzsrambical/.ssh/config",463,0,"",plaintext,selection_keyboard
|
91 |
+
91,153371,"/Users/franzsrambical/.ssh/config",463,0,"g",plaintext,content
|
92 |
+
92,153371,"/Users/franzsrambical/.ssh/config",464,0,"",plaintext,selection_keyboard
|
93 |
+
93,153460,"/Users/franzsrambical/.ssh/config",464,0,"i",plaintext,content
|
94 |
+
94,153461,"/Users/franzsrambical/.ssh/config",465,0,"",plaintext,selection_keyboard
|
95 |
+
95,153530,"/Users/franzsrambical/.ssh/config",465,0,"n",plaintext,content
|
96 |
+
96,153531,"/Users/franzsrambical/.ssh/config",466,0,"",plaintext,selection_keyboard
|
97 |
+
97,153785,"/Users/franzsrambical/.ssh/config",466,0,"@",plaintext,content
|
98 |
+
98,153786,"/Users/franzsrambical/.ssh/config",467,0,"",plaintext,selection_keyboard
|
99 |
+
99,154547,"/Users/franzsrambical/.ssh/config",466,1,"",plaintext,content
|
100 |
+
100,155018,"/Users/franzsrambical/.ssh/config",466,0,".",plaintext,content
|
101 |
+
101,155020,"/Users/franzsrambical/.ssh/config",467,0,"",plaintext,selection_keyboard
|
102 |
+
102,155700,"/Users/franzsrambical/.ssh/config",467,0,"h",plaintext,content
|
103 |
+
103,155701,"/Users/franzsrambical/.ssh/config",468,0,"",plaintext,selection_keyboard
|
104 |
+
104,155757,"/Users/franzsrambical/.ssh/config",468,0,"a",plaintext,content
|
105 |
+
105,155760,"/Users/franzsrambical/.ssh/config",469,0,"",plaintext,selection_keyboard
|
106 |
+
106,155841,"/Users/franzsrambical/.ssh/config",469,0,"i",plaintext,content
|
107 |
+
107,155843,"/Users/franzsrambical/.ssh/config",470,0,"",plaintext,selection_keyboard
|
108 |
+
108,155901,"/Users/franzsrambical/.ssh/config",470,0,"c",plaintext,content
|
109 |
+
109,155902,"/Users/franzsrambical/.ssh/config",471,0,"",plaintext,selection_keyboard
|
110 |
+
110,156007,"/Users/franzsrambical/.ssh/config",471,0,"o",plaintext,content
|
111 |
+
111,156008,"/Users/franzsrambical/.ssh/config",472,0,"",plaintext,selection_keyboard
|
112 |
+
112,156111,"/Users/franzsrambical/.ssh/config",472,0,"r",plaintext,content
|
113 |
+
113,156112,"/Users/franzsrambical/.ssh/config",473,0,"",plaintext,selection_keyboard
|
114 |
+
114,156168,"/Users/franzsrambical/.ssh/config",473,0,"e",plaintext,content
|
115 |
+
115,156169,"/Users/franzsrambical/.ssh/config",474,0,"",plaintext,selection_keyboard
|
116 |
+
116,156248,"/Users/franzsrambical/.ssh/config",474,0,".",plaintext,content
|
117 |
+
117,156249,"/Users/franzsrambical/.ssh/config",475,0,"",plaintext,selection_keyboard
|
118 |
+
118,156396,"/Users/franzsrambical/.ssh/config",475,0,"b",plaintext,content
|
119 |
+
119,156397,"/Users/franzsrambical/.ssh/config",476,0,"",plaintext,selection_keyboard
|
120 |
+
120,156584,"/Users/franzsrambical/.ssh/config",476,0,"e",plaintext,content
|
121 |
+
121,156586,"/Users/franzsrambical/.ssh/config",477,0,"",plaintext,selection_keyboard
|
122 |
+
122,156644,"/Users/franzsrambical/.ssh/config",477,0,"r",plaintext,content
|
123 |
+
123,156645,"/Users/franzsrambical/.ssh/config",478,0,"",plaintext,selection_keyboard
|
124 |
+
124,156709,"/Users/franzsrambical/.ssh/config",478,0,"l",plaintext,content
|
125 |
+
125,156710,"/Users/franzsrambical/.ssh/config",479,0,"",plaintext,selection_keyboard
|
126 |
+
126,156824,"/Users/franzsrambical/.ssh/config",479,0,"i",plaintext,content
|
127 |
+
127,156825,"/Users/franzsrambical/.ssh/config",480,0,"",plaintext,selection_keyboard
|
128 |
+
128,156896,"/Users/franzsrambical/.ssh/config",480,0,"n",plaintext,content
|
129 |
+
129,156896,"/Users/franzsrambical/.ssh/config",481,0,"",plaintext,selection_keyboard
|
130 |
+
130,157332,"/Users/franzsrambical/.ssh/config",481,0,"\n",plaintext,content
|
131 |
+
131,157969,"/Users/franzsrambical/.ssh/config",482,0," ",plaintext,content
|
132 |
+
132,158635,"/Users/franzsrambical/.ssh/config",484,0,"H",plaintext,content
|
133 |
+
133,158636,"/Users/franzsrambical/.ssh/config",485,0,"",plaintext,selection_keyboard
|
134 |
+
134,158766,"/Users/franzsrambical/.ssh/config",485,0,"o",plaintext,content
|
135 |
+
135,158768,"/Users/franzsrambical/.ssh/config",486,0,"",plaintext,selection_keyboard
|
136 |
+
136,158794,"/Users/franzsrambical/.ssh/config",486,0,"s",plaintext,content
|
137 |
+
137,158797,"/Users/franzsrambical/.ssh/config",487,0,"",plaintext,selection_keyboard
|
138 |
+
138,158853,"/Users/franzsrambical/.ssh/config",487,0,"t",plaintext,content
|
139 |
+
139,158855,"/Users/franzsrambical/.ssh/config",488,0,"",plaintext,selection_keyboard
|
140 |
+
140,159080,"/Users/franzsrambical/.ssh/config",488,0,"N",plaintext,content
|
141 |
+
141,159083,"/Users/franzsrambical/.ssh/config",489,0,"",plaintext,selection_keyboard
|
142 |
+
142,159245,"/Users/franzsrambical/.ssh/config",489,0,"a",plaintext,content
|
143 |
+
143,159247,"/Users/franzsrambical/.ssh/config",490,0,"",plaintext,selection_keyboard
|
144 |
+
144,159281,"/Users/franzsrambical/.ssh/config",490,0,"m",plaintext,content
|
145 |
+
145,159283,"/Users/franzsrambical/.ssh/config",491,0,"",plaintext,selection_keyboard
|
146 |
+
146,159432,"/Users/franzsrambical/.ssh/config",491,0,"e",plaintext,content
|
147 |
+
147,159434,"/Users/franzsrambical/.ssh/config",492,0,"",plaintext,selection_keyboard
|
148 |
+
148,164462,"/Users/franzsrambical/.ssh/config",492,0," ",plaintext,content
|
149 |
+
149,164464,"/Users/franzsrambical/.ssh/config",493,0,"",plaintext,selection_keyboard
|
150 |
+
150,165235,"/Users/franzsrambical/.ssh/config",493,0,"l",plaintext,content
|
151 |
+
151,165237,"/Users/franzsrambical/.ssh/config",494,0,"",plaintext,selection_keyboard
|
152 |
+
152,165387,"/Users/franzsrambical/.ssh/config",494,0,"o",plaintext,content
|
153 |
+
153,165391,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_keyboard
|
154 |
+
154,165462,"/Users/franzsrambical/.ssh/config",495,0,"g",plaintext,content
|
155 |
+
155,165465,"/Users/franzsrambical/.ssh/config",496,0,"",plaintext,selection_keyboard
|
156 |
+
156,165536,"/Users/franzsrambical/.ssh/config",496,0,"i",plaintext,content
|
157 |
+
157,165538,"/Users/franzsrambical/.ssh/config",497,0,"",plaintext,selection_keyboard
|
158 |
+
158,165626,"/Users/franzsrambical/.ssh/config",497,0,"n",plaintext,content
|
159 |
+
159,165628,"/Users/franzsrambical/.ssh/config",498,0,"",plaintext,selection_keyboard
|
160 |
+
160,165774,"/Users/franzsrambical/.ssh/config",498,0,".",plaintext,content
|
161 |
+
161,165777,"/Users/franzsrambical/.ssh/config",499,0,"",plaintext,selection_keyboard
|
162 |
+
162,166016,"/Users/franzsrambical/.ssh/config",499,0,"h",plaintext,content
|
163 |
+
163,166018,"/Users/franzsrambical/.ssh/config",500,0,"",plaintext,selection_keyboard
|
164 |
+
164,166062,"/Users/franzsrambical/.ssh/config",500,0,"a",plaintext,content
|
165 |
+
165,166064,"/Users/franzsrambical/.ssh/config",501,0,"",plaintext,selection_keyboard
|
166 |
+
166,166165,"/Users/franzsrambical/.ssh/config",501,0,"i",plaintext,content
|
167 |
+
167,166167,"/Users/franzsrambical/.ssh/config",502,0,"",plaintext,selection_keyboard
|
168 |
+
168,166294,"/Users/franzsrambical/.ssh/config",502,0,"c",plaintext,content
|
169 |
+
169,166296,"/Users/franzsrambical/.ssh/config",503,0,"",plaintext,selection_keyboard
|
170 |
+
170,166400,"/Users/franzsrambical/.ssh/config",503,0,"o",plaintext,content
|
171 |
+
171,166402,"/Users/franzsrambical/.ssh/config",504,0,"",plaintext,selection_keyboard
|
172 |
+
172,166494,"/Users/franzsrambical/.ssh/config",504,0,"r",plaintext,content
|
173 |
+
173,166496,"/Users/franzsrambical/.ssh/config",505,0,"",plaintext,selection_keyboard
|
174 |
+
174,166569,"/Users/franzsrambical/.ssh/config",505,0,"e",plaintext,content
|
175 |
+
175,166571,"/Users/franzsrambical/.ssh/config",506,0,"",plaintext,selection_keyboard
|
176 |
+
176,166623,"/Users/franzsrambical/.ssh/config",506,0,".",plaintext,content
|
177 |
+
177,166625,"/Users/franzsrambical/.ssh/config",507,0,"",plaintext,selection_keyboard
|
178 |
+
178,166811,"/Users/franzsrambical/.ssh/config",507,0,"b",plaintext,content
|
179 |
+
179,166812,"/Users/franzsrambical/.ssh/config",508,0,"",plaintext,selection_keyboard
|
180 |
+
180,166871,"/Users/franzsrambical/.ssh/config",508,0,"e",plaintext,content
|
181 |
+
181,166873,"/Users/franzsrambical/.ssh/config",509,0,"",plaintext,selection_keyboard
|
182 |
+
182,166947,"/Users/franzsrambical/.ssh/config",509,0,"r",plaintext,content
|
183 |
+
183,166948,"/Users/franzsrambical/.ssh/config",510,0,"",plaintext,selection_keyboard
|
184 |
+
184,166988,"/Users/franzsrambical/.ssh/config",510,0,"l",plaintext,content
|
185 |
+
185,166989,"/Users/franzsrambical/.ssh/config",511,0,"",plaintext,selection_keyboard
|
186 |
+
186,167052,"/Users/franzsrambical/.ssh/config",511,0,"i",plaintext,content
|
187 |
+
187,167054,"/Users/franzsrambical/.ssh/config",512,0,"",plaintext,selection_keyboard
|
188 |
+
188,167115,"/Users/franzsrambical/.ssh/config",512,0,"n",plaintext,content
|
189 |
+
189,167117,"/Users/franzsrambical/.ssh/config",513,0,"",plaintext,selection_keyboard
|
190 |
+
190,167392,"/Users/franzsrambical/.ssh/config",507,6,"berlin",plaintext,content
|
191 |
+
191,167715,"/Users/franzsrambical/.ssh/config",513,0,"\n ",plaintext,content
|
192 |
+
192,169941,"/Users/franzsrambical/.ssh/config",516,0,"U",plaintext,content
|
193 |
+
193,169943,"/Users/franzsrambical/.ssh/config",517,0,"",plaintext,selection_keyboard
|
194 |
+
194,170103,"/Users/franzsrambical/.ssh/config",517,0,"s",plaintext,content
|
195 |
+
195,170105,"/Users/franzsrambical/.ssh/config",518,0,"",plaintext,selection_keyboard
|
196 |
+
196,170170,"/Users/franzsrambical/.ssh/config",518,0,"e",plaintext,content
|
197 |
+
197,170172,"/Users/franzsrambical/.ssh/config",519,0,"",plaintext,selection_keyboard
|
198 |
+
198,170250,"/Users/franzsrambical/.ssh/config",519,0,"r",plaintext,content
|
199 |
+
199,170251,"/Users/franzsrambical/.ssh/config",520,0,"",plaintext,selection_keyboard
|
200 |
+
200,171002,"/Users/franzsrambical/.ssh/config",520,0," ",plaintext,content
|
201 |
+
201,171005,"/Users/franzsrambical/.ssh/config",521,0,"",plaintext,selection_keyboard
|
202 |
+
202,171222,"/Users/franzsrambical/.ssh/config",521,0,"f",plaintext,content
|
203 |
+
203,171223,"/Users/franzsrambical/.ssh/config",522,0,"",plaintext,selection_keyboard
|
204 |
+
204,171374,"/Users/franzsrambical/.ssh/config",522,0,"r",plaintext,content
|
205 |
+
205,171379,"/Users/franzsrambical/.ssh/config",523,0,"",plaintext,selection_keyboard
|
206 |
+
206,171397,"/Users/franzsrambical/.ssh/config",523,0,"a",plaintext,content
|
207 |
+
207,171398,"/Users/franzsrambical/.ssh/config",524,0,"",plaintext,selection_keyboard
|
208 |
+
208,171500,"/Users/franzsrambical/.ssh/config",524,0,"n",plaintext,content
|
209 |
+
209,171501,"/Users/franzsrambical/.ssh/config",525,0,"",plaintext,selection_keyboard
|
210 |
+
210,171581,"/Users/franzsrambical/.ssh/config",525,0,"z",plaintext,content
|
211 |
+
211,171583,"/Users/franzsrambical/.ssh/config",526,0,"",plaintext,selection_keyboard
|
212 |
+
212,171700,"/Users/franzsrambical/.ssh/config",526,0,".",plaintext,content
|
213 |
+
213,171701,"/Users/franzsrambical/.ssh/config",527,0,"",plaintext,selection_keyboard
|
214 |
+
214,171772,"/Users/franzsrambical/.ssh/config",527,0,"s",plaintext,content
|
215 |
+
215,171773,"/Users/franzsrambical/.ssh/config",528,0,"",plaintext,selection_keyboard
|
216 |
+
216,171831,"/Users/franzsrambical/.ssh/config",528,0,"r",plaintext,content
|
217 |
+
217,171833,"/Users/franzsrambical/.ssh/config",529,0,"",plaintext,selection_keyboard
|
218 |
+
218,171914,"/Users/franzsrambical/.ssh/config",529,0,"a",plaintext,content
|
219 |
+
219,171916,"/Users/franzsrambical/.ssh/config",530,0,"",plaintext,selection_keyboard
|
220 |
+
220,171943,"/Users/franzsrambical/.ssh/config",530,0,"m",plaintext,content
|
221 |
+
221,171945,"/Users/franzsrambical/.ssh/config",531,0,"",plaintext,selection_keyboard
|
222 |
+
222,172140,"/Users/franzsrambical/.ssh/config",531,0,"b",plaintext,content
|
223 |
+
223,172143,"/Users/franzsrambical/.ssh/config",532,0,"",plaintext,selection_keyboard
|
224 |
+
224,172186,"/Users/franzsrambical/.ssh/config",532,0,"i",plaintext,content
|
225 |
+
225,172188,"/Users/franzsrambical/.ssh/config",533,0,"",plaintext,selection_keyboard
|
226 |
+
226,172245,"/Users/franzsrambical/.ssh/config",533,0,"c",plaintext,content
|
227 |
+
227,172247,"/Users/franzsrambical/.ssh/config",534,0,"",plaintext,selection_keyboard
|
228 |
+
228,172289,"/Users/franzsrambical/.ssh/config",534,0,"a",plaintext,content
|
229 |
+
229,172291,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_keyboard
|
230 |
+
230,172390,"/Users/franzsrambical/.ssh/config",535,0,"l",plaintext,content
|
231 |
+
231,172391,"/Users/franzsrambical/.ssh/config",536,0,"",plaintext,selection_keyboard
|
232 |
+
232,172556,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_command
|
233 |
+
233,172719,"/Users/franzsrambical/.ssh/config",503,0,"",plaintext,selection_command
|
234 |
+
234,172910,"/Users/franzsrambical/.ssh/config",513,0,"\n ",plaintext,content
|
235 |
+
235,173988,"/Users/franzsrambical/.ssh/config",516,0,"I",plaintext,content
|
236 |
+
236,173989,"/Users/franzsrambical/.ssh/config",517,0,"",plaintext,selection_keyboard
|
237 |
+
237,174082,"/Users/franzsrambical/.ssh/config",517,0,"d",plaintext,content
|
238 |
+
238,174084,"/Users/franzsrambical/.ssh/config",518,0,"",plaintext,selection_keyboard
|
239 |
+
239,174148,"/Users/franzsrambical/.ssh/config",518,0,"e",plaintext,content
|
240 |
+
240,174150,"/Users/franzsrambical/.ssh/config",519,0,"",plaintext,selection_keyboard
|
241 |
+
241,174271,"/Users/franzsrambical/.ssh/config",519,0,"n",plaintext,content
|
242 |
+
242,174273,"/Users/franzsrambical/.ssh/config",520,0,"",plaintext,selection_keyboard
|
243 |
+
243,174746,"/Users/franzsrambical/.ssh/config",516,4,"IdentityFile",plaintext,content
|
244 |
+
244,175993,"/Users/franzsrambical/.ssh/config",528,0," ",plaintext,content
|
245 |
+
245,175995,"/Users/franzsrambical/.ssh/config",529,0,"",plaintext,selection_keyboard
|
246 |
+
246,176464,"/Users/franzsrambical/.ssh/config",529,0,"~",plaintext,content
|
247 |
+
247,176467,"/Users/franzsrambical/.ssh/config",530,0,"",plaintext,selection_keyboard
|
248 |
+
248,177648,"/Users/franzsrambical/.ssh/config",530,0,"/",plaintext,content
|
249 |
+
249,177649,"/Users/franzsrambical/.ssh/config",531,0,"",plaintext,selection_keyboard
|
250 |
+
250,177830,"/Users/franzsrambical/.ssh/config",531,0,".",plaintext,content
|
251 |
+
251,177832,"/Users/franzsrambical/.ssh/config",532,0,"",plaintext,selection_keyboard
|
252 |
+
252,177940,"/Users/franzsrambical/.ssh/config",532,0,"s",plaintext,content
|
253 |
+
253,177942,"/Users/franzsrambical/.ssh/config",533,0,"",plaintext,selection_keyboard
|
254 |
+
254,178078,"/Users/franzsrambical/.ssh/config",533,0,"s",plaintext,content
|
255 |
+
255,178081,"/Users/franzsrambical/.ssh/config",534,0,"",plaintext,selection_keyboard
|
256 |
+
256,178173,"/Users/franzsrambical/.ssh/config",534,0,"h",plaintext,content
|
257 |
+
257,178175,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_keyboard
|
258 |
+
258,179348,"/Users/franzsrambical/.ssh/config",535,0,"/",plaintext,content
|
259 |
+
259,179351,"/Users/franzsrambical/.ssh/config",536,0,"",plaintext,selection_keyboard
|
260 |
+
260,179561,"/Users/franzsrambical/.ssh/config",536,0,"i",plaintext,content
|
261 |
+
261,179563,"/Users/franzsrambical/.ssh/config",537,0,"",plaintext,selection_keyboard
|
262 |
+
262,179646,"/Users/franzsrambical/.ssh/config",537,0,"d",plaintext,content
|
263 |
+
263,179648,"/Users/franzsrambical/.ssh/config",538,0,"",plaintext,selection_keyboard
|
264 |
+
264,179919,"/Users/franzsrambical/.ssh/config",538,0,"_",plaintext,content
|
265 |
+
265,179922,"/Users/franzsrambical/.ssh/config",539,0,"",plaintext,selection_keyboard
|
266 |
+
266,180693,"/Users/franzsrambical/.ssh/config",536,3,"id_ed25519",plaintext,content
|
267 |
+
267,181818,"/Users/franzsrambical/.ssh/config",545,0,"",plaintext,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-b3271939-bd4f-497b-b876-5ea890ece75f1750632226677-2025_06_22-15.43.48.822/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-d413e23e-d7da-4c64-9e15-0b0c0e6031031751383188198-2025_07_01-17.19.54.522/source.csv
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,2,"big_vision/input_pipeline.py",0,0,"# Copyright 2024 Big Vision Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the ""License"");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an ""AS IS"" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n""""""ImageNet input pipeline.""""""\nimport collections\nimport functools\nimport itertools\nimport math\nimport multiprocessing.pool\n\nfrom absl import logging\nfrom big_vision.datasets import sequence_packing\nimport big_vision.datasets.core as ds_core\nimport big_vision.pp.builder as pp_builder\nimport big_vision.utils as u\nimport einops\nimport jax\nimport numpy as np\nimport tensorflow as tf\n\n\nDEFAULT_NUM_PARALLEL_CALLS = 100\n\n\ndef make_for_train(\n data, preprocess_fn, batch_size,\n shuffle_buffer_size=None, cache_raw=False,\n num_parallel_calls=DEFAULT_NUM_PARALLEL_CALLS, prefetch=2,\n *,\n pre_filter_fn=None, post_filter_fn=None,\n pack=None, skip_errors=False,\n):\n """"""Makes an input pipeline for training.""""""\n # Use data filtering at your own risk: the actual split sizes won't be known\n # in advance, so epoch-based things won't work correctly.\n\n data = _add_tpu_host_options(data)\n\n data = data.filter(pre_filter_fn) if pre_filter_fn else data\n data = data.cache() if cache_raw else data\n\n # First shuffle and then repeat (each with a different shuffle). This way\n # the data for one epoch is all seen before the next one is processed and\n # significantly affects the number of times each example is seen when\n # processing for small number of epochs.\n if shuffle_buffer_size:\n data = data.shuffle(shuffle_buffer_size, reshuffle_each_iteration=True)\n data = data.repeat(None)\n\n data = data.map(preprocess_fn, num_parallel_calls=num_parallel_calls)\n data = data.filter(post_filter_fn) if post_filter_fn else data\n\n data = data.ignore_errors(log_warning=True) if skip_errors else data\n\n if pack:\n data = sequence_packing.pack_dataset(\n data,\n batch_size // jax.process_count() if batch_size else None,\n pack.to_dict())\n\n # Drop remainder makes shape fully static, so we can later use it if needed.\n if batch_size:\n data = data.batch(batch_size // jax.process_count(), drop_remainder=True)\n if prefetch: # None means autotune, but we never want that.\n data = data.prefetch(prefetch)\n return data\n\n\ndef training(input_config):\n """"""Reads the data from a single dataset, or mixes it from multiple.\n\n The data is read either from one or mixed from multiple datasets, depending\n on the `input_config`.\n\n Args:\n input_config: Configures the input pipeline. See input_pipeline_test for\n examples.\n\n Returns:\n A tuple containing (possibly mixed) tf.data.Dataset and a total number of\n training examples.\n """"""\n per_pipeline_configs = (\n ""shuffle_buffer_size"", ""cache_raw"", ""num_parallel_calls"",\n ""pre_filter_fn"", ""post_filter_fn"", ""pack"", ""skip_errors"")\n def config_to_kw(config):\n assert ""filter_fn"" not in config, ""Deprecated; use `pre_filter_fn` instead.""\n return {k: config[k] for k in per_pipeline_configs if k in config}\n\n batch_size = input_config.batch_size\n # Handle separately the common case when no mixing happens.\n if isinstance(input_config.data.get(""name""), str):\n train_data = ds_core.get(**input_config.data)\n train_ds = make_for_train(\n data=train_data.get_tfdata(ordered=False,\n **input_config.get(""tfdata"", {})),\n batch_size=batch_size,\n preprocess_fn=pp_builder.get_preprocess_fn(input_config.get(""pp"")),\n prefetch=input_config.get(""prefetch"", 2), # Default 2 for bwd compat.\n **config_to_kw(input_config)\n )\n return train_ds, train_data.total_examples\n\n # A helpful error instead of silent ignore:\n for k in per_pipeline_configs:\n assert k not in input_config, f""{k} is per-dataset in multi-input.""\n\n # Parallelize the loading of datasets when doing data mixture.\n # For larger mixes, we sometimes spend >5min when doing sequentially.\n # NOTE: functools.cache is thread-safe.\n def _make(name_and_weight):\n name, weight = name_and_weight\n dataset = input_config[name]\n train_data = ds_core.get(**dataset.data)\n dataset = make_for_train(\n data=train_data.get_tfdata(ordered=False, **dataset.get(""tfdata"", {})),\n # Don't batch the data just yet, it will be done after\n # mixing the different datasets below.\n batch_size=None,\n preprocess_fn=pp_builder.get_preprocess_fn(dataset.get(""pp""), name),\n prefetch=0, # Prefetching each pipeline leads to huge OOMs.\n **config_to_kw(dataset)\n )\n if keys := input_config.get(""keep_only""):\n dataset = dataset.map(lambda d, keys=keys: {k: d[k] for k in keys})\n return name, dataset, weight, train_data.total_examples\n\n names, datasets, weights, totals = [], [], [], []\n pool = multiprocessing.pool.ThreadPool(\n input_config.get(""thread_pool_size"", len(input_config.data))\n )\n for name, dataset, weight, total in pool.map(\n # Skip weight=0 datasets as a convenient optimization in sweeps.\n _make, ((name, w) for name, w in input_config.data.items() if w)):\n names.append(name)\n datasets.append(dataset)\n weights.append(weight)\n totals.append(total)\n\n # Normalize the weights such that they sum up to 1.\n weights = [x / sum(weights) for x in weights]\n\n logging.info(\n ""NOTE: Total dataset mix size: %d\nContributions:\n%s"", sum(totals),\n ""\n"".join(f""{ds}: {n} ({w * 100:.2g}%)""\n for ds, n, w in zip(names, totals, weights))\n )\n\n train_ds = tf.data.Dataset.sample_from_datasets(\n datasets, weights, stop_on_empty_dataset=True)\n if input_config.get(""pack""):\n train_ds = sequence_packing.pack_dataset(\n train_ds,\n input_config[""batch_size""] // jax.process_count(),\n input_config.pack.to_dict())\n\n train_ds = train_ds.batch(\n input_config[""batch_size""] // jax.process_count(), drop_remainder=True)\n if (pf := input_config.get(""prefetch"", 2)):\n train_ds = train_ds.prefetch(pf)\n\n return train_ds, sum(totals)\n\n\n# The pipeline below is used for evals in multi-{G,T}PU and multi-host settings.\n# As the total number of examples may not be evenly divisible accross all\n# devices, we use the `infinite tf.data padding` trick, which was suggested by\n# Andreas Steiner and also implemented by him in the clu library:\n# https://github.com/google/CommonLoopUtils/blob/84b777c42dfd3fb6685537138433bfeb5241a006/clu/deterministic_data.py#L304.\ndef make_for_inference(\n data, preprocess_fn, batch_size, num_ex_per_process,\n cache_raw=False, cache_final=False,\n num_parallel_calls=DEFAULT_NUM_PARALLEL_CALLS, prefetch=1,\n):\n """"""Makes an input pipeline for inference.""""""\n\n data = _add_tpu_host_options(data)\n data = data.cache() if cache_raw else data\n data = data.map(_add_internal_fields(preprocess_fn),\n num_parallel_calls=num_parallel_calls)\n data = data.concatenate(_get_pad_data(data))\n\n local_batch_size = batch_size // jax.process_count()\n # This is just like `batch`, but allows batching elements of different shapes\n # into a tf.RaggedTensor. Elements of the same fixed shape remain tf.Tensors.\n # Since we do 'infinite' padding it is safe to drop the remainder.\n data = data.ragged_batch(batch_size=local_batch_size, drop_remainder=True)\n\n # We need to make sure that all hosts process all data and exactly the same\n # number of batches. Below we take max per-host num examples and use it on all\n # hosts to derive the number of batches.\n num_batches = math.ceil(max(num_ex_per_process) / local_batch_size)\n data = data.take(num_batches)\n\n # Note we cache data after a finite number of batches is taken.\n data = data.cache() if cache_final else data\n data = data.repeat()\n data = data.prefetch(prefetch) if prefetch else data\n return data, num_batches\n\n\ndef _get_pad_data(data):\n def zeros_like_spec(spec):\n # For unknown/flexible dimensions (None), just use 0 instead.\n return tf.zeros([x or 0 for x in spec.shape], spec.dtype)\n\n zero = jax.tree.map(zeros_like_spec, data.element_spec)\n return tf.data.Dataset.from_tensors(zero).repeat()\n\n\ndef _add_internal_fields(pp_fn):\n """"""Wraps pp_fn to add _mask and _id keys.""""""\n # Adds internal keys, that we either, in this order of preference:\n # 1. keep from result of pp_fn,\n # 2. carry over from raw (not pp_fn'd) example, or\n # 3. add, if that makes sense.\n def _pp_fn(example):\n result = pp_fn(example)\n # _mask will be False on padded examples (see _get_pad_data).\n result.setdefault(""_mask"", example.get(""_mask"", tf.constant(True)))\n # Not all data-sources can provide an ID. Only carry-over if it can:\n if ""_id"" in example and ""_id"" not in result:\n result[""_id""] = example[""_id""]\n return result\n return _pp_fn\n\n\ndef _add_tpu_host_options(data):\n options = tf.data.Options()\n options.threading.private_threadpool_size = 48\n options.threading.max_intra_op_parallelism = 1\n\n # Stop a whole bunch of magic stuff that eats up all RAM:\n options.experimental_optimization.inject_prefetch = False\n\n return data.with_options(options)\n\n\ndef prefetch_iterator(it, n):\n """"""Runs iterator `it` ahead for `n` steps. Adapted from flax.""""""\n if not n:\n yield from it\n return\n queue = collections.deque()\n\n def enqueue(n_steps): # Enqueues *up to* `n` elements from the iterator.\n for data in itertools.islice(it, n_steps):\n # Prefetching will parallelize any processing that happens in a different\n # thread (like `jax.device_put()`), but it will be of no use for\n # processing that happens in the same thread.\n queue.append(data)\n\n enqueue(n) # Fill up the buffer.\n while queue:\n yield queue.popleft()\n enqueue(1)\n\n\ndef threadstart_iterator(it):\n """"""Starts an iterator right away in a background thread.""""""\n # We already want to ""start"" the iterator in order to start the underlying\n # dataset prefetch mechanisms, so here we get the first element. But we don't\n # want to lose it from training, so we yield that one afterwards.\n # (internal link)\n pool = multiprocessing.pool.ThreadPool(processes=1)\n first_ex_promise = pool.apply_async(lambda: next(it))\n\n yield first_ex_promise.get()\n yield from it\n\n\ndef tf_to_numpy(x):\n """"""Convert any TF types to numpy.""""""\n if isinstance(x, tf.Tensor):\n if x.dtype != tf.string: # Dense, non-string tensor? Easy!\n return x.numpy()\n else: # A dense string tensor? Turn into actual strings, not bytes.\n return np.vectorize(bytes.decode, otypes=[str])(x.numpy())\n\n # The rest deals with RaggedTensors, for two main reasons:\n # - For strings, recursively apply the above conversion\n # - For common cases (eg batch of images), return more reasonable shapes.\n\n # Replace all None's in the shape by a fixed number, in the (somewhat common)\n # case that they are marked ragged, but really all have the same shape.\n real_shape = list(x.shape)\n for i, s in enumerate(real_shape[1:]):\n if s is not None: continue\n rowlens = np.diff(x.nested_row_splits[i])\n if len(set(rowlens)) == 1:\n real_shape[i + 1] = rowlens[0]\n\n if None not in real_shape:\n return tf_to_numpy(x.flat_values).reshape(real_shape)\n\n # It's actually ragged, reconstruct the array from the variable length pieces.\n splits = x.row_splits.numpy()\n rows = [tf_to_numpy(x.values[splits[i]:splits[i + 1]])\n for i in range(len(splits) - 1)]\n return np.fromiter(rows, dtype=object)\n\n\n# Note that the order of global devices for sharding data is important and\n# should be compatible with device order used for models params, state, etc.\ndef start_global(\n data, global_devices, n_prefetch=1, keep_on_cpu=frozenset(), warmup=False):\n """"""Starts the global input pipeline.""""""\n def maybe_shard(name, x):\n if name in keep_on_cpu:\n return tf_to_numpy(x)\n return u.make_fsarray_from_local_slice(x, global_devices)\n\n it = iter(data)\n if warmup: # actually pre-fill shuffle buffers etc.\n it = threadstart_iterator(it)\n\n it = (u.tree_map_with_names(maybe_shard, elem) for elem in it)\n return prefetch_iterator(it, n_prefetch)\n\n\n##########################################################################\n# The code below is pmap-specific and is deprecated, please switch to jit.\n##########################################################################\n\n\ndef shard_and_put(x, shard=True, put=True):\n x = np.asarray(memoryview(x)) # No-copy conversion: http://(internal link)\n if shard:\n x = einops.rearrange(x, ""(d l) ... -> d l ..."", d=jax.local_device_count())\n if shard and put: # Only works for pmap (for now).\n x = jax.device_put_sharded(list(x), jax.local_devices())\n return x\n\n\ndef start_input_pipeline(data, n_prefetch=1, shard=True):\n fn = functools.partial(shard_and_put, shard=shard, put=n_prefetch)\n it = (jax.tree.map(fn, elem) for elem in iter(data))\n return prefetch_iterator(it, n_prefetch)\n\n\ndef start_ragged_input_pipeline(data, n_prefetch=1, shard=True, ragged=None):\n def maybe_shard_and_put(name, x):\n return x if name in (ragged or {}) else shard_and_put(x, shard)\n\n it = (u.tree_map_with_names(maybe_shard_and_put, elem) for elem in iter(data))\n return prefetch_iterator(it, n_prefetch)\n",python,tab
|
3 |
+
2,53,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
4 |
+
3,1439,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:19:54 PM [info] Activating crowd-code\n5:19:54 PM [info] Recording started\n5:19:54 PM [info] Initializing git provider using file system watchers...\n5:19:54 PM [info] Git repository found\n5:19:54 PM [info] Git provider initialized successfully\n",Log,content
|
5 |
+
4,1537,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"5:19:55 PM [info] Initial git state: [object Object]\n",Log,content
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-d9cdf338-0ddd-4679-853a-6d7bdf2b18581751046137722-2025_06_27-10.42.19.354/source.csv
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,1,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
3 |
+
2,38,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
4 |
+
3,67,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:42:19 AM [info] Activating crowd-code\n10:42:19 AM [info] Recording started\n10:42:19 AM [info] Initializing git provider using file system watchers...\n10:42:19 AM [info] Git repository found\n10:42:19 AM [info] Git provider initialized successfully\n10:42:19 AM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,905,"utils/dataloader.py",0,0,"",python,tab
|
6 |
+
5,19039,"utils/dataloader.py",2752,0,"",python,selection_command
|
7 |
+
6,19130,"utils/dataloader.py",2715,0,"",python,selection_command
|
8 |
+
7,30009,"models/dynamics.py",0,0,"from typing import Dict, Any\n\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\n\nfrom utils.nn import STTransformer\n\n\nclass DynamicsMaskGIT(nn.Module):\n """"""MaskGIT dynamics model""""""\n\n model_dim: int\n num_latents: int\n num_blocks: int\n num_heads: int\n dropout: float\n mask_limit: float\n\n def setup(self):\n self.dynamics = STTransformer(\n self.model_dim,\n self.num_latents,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.patch_embed = nn.Embed(self.num_latents, self.model_dim)\n self.mask_token = self.param(\n ""mask_token"",\n nn.initializers.lecun_uniform(),\n (1, 1, 1, self.model_dim),\n )\n self.action_up = nn.Dense(self.model_dim)\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n # --- Mask videos ---\n vid_embed = self.patch_embed(batch[""video_tokens""])\n if training:\n rng1, rng2 = jax.random.split(batch[""mask_rng""])\n mask_prob = jax.random.uniform(rng1, minval=self.mask_limit)\n mask = jax.random.bernoulli(rng2, mask_prob, vid_embed.shape[:-1])\n mask = mask.at[:, 0].set(False)\n vid_embed = jnp.where(jnp.expand_dims(mask, -1), self.mask_token, vid_embed)\n else:\n mask = None\n\n # --- Predict transition ---\n act_embed = self.action_up(batch[""latent_actions""])\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n logits = self.dynamics(vid_embed)\n return dict(token_logits=logits, mask=mask)\n",python,tab
|
9 |
+
8,30937,"models/dynamics.py",416,0,"",python,selection_command
|
10 |
+
9,31509,"models/dynamics.py",1359,0,"",python,selection_command
|
11 |
+
10,31790,"models/dynamics.py",1655,0,"",python,selection_command
|
12 |
+
11,32552,"models/dynamics.py",753,0,"",python,selection_command
|
13 |
+
12,32710,"models/dynamics.py",165,0,"",python,selection_command
|
14 |
+
13,32898,"models/dynamics.py",0,0,"",python,selection_command
|
15 |
+
14,35425,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom genie import Genie, restore_genie_components\nfrom models.tokenizer import TokenizerVQVAE\nfrom models.lam import LatentActionModel\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n # Optimization\n batch_size: int = 36\n min_lr: float = 3e-6\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=jnp.float32\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Restore checkpoint ---\n train_state = restore_genie_components(\n train_state, replicated_sharding, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n step = 0\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""genie_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
16 |
+
15,39797,"train_dynamics.py",5890,0,"",python,selection_command
|
17 |
+
16,40207,"train_dynamics.py",5044,0,"",python,selection_command
|
18 |
+
17,40597,"train_dynamics.py",4217,0,"",python,selection_command
|
19 |
+
18,40765,"train_dynamics.py",3598,0,"",python,selection_command
|
20 |
+
19,40946,"train_dynamics.py",2859,0,"",python,selection_command
|
21 |
+
20,41080,"train_dynamics.py",2006,0,"",python,selection_command
|
22 |
+
21,41229,"train_dynamics.py",1466,0,"",python,selection_command
|
23 |
+
22,41446,"train_dynamics.py",882,0,"",python,selection_command
|
24 |
+
23,41604,"train_dynamics.py",388,0,"",python,selection_command
|
25 |
+
24,41744,"train_dynamics.py",0,0,"",python,selection_command
|
26 |
+
25,43268,"train_dynamics.py",617,0,"",python,selection_command
|
27 |
+
26,46121,"train_dynamics.py",0,0,"",python,selection_command
|
28 |
+
27,46786,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
29 |
+
28,49091,"train_dynamics.py",0,0,"",python,tab
|
30 |
+
29,49113,"TERMINAL",0,0,"",,terminal_focus
|
31 |
+
30,104571,"requirements.txt",0,0,"dm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.8.5\njax[cuda12]>=0.4.30\noptax>=0.2.3\nprocgen>=0.10.7\ntyro>=0.8.5\nwandb>=0.17.4\ntensorflow>=2.1\npre-commit>=4.2.0",pip-requirements,tab
|
32 |
+
31,106305,"requirements.txt",73,0,"",pip-requirements,selection_command
|
33 |
+
32,106838,"requirements.txt",73,0,"#",pip-requirements,content
|
34 |
+
33,106844,"requirements.txt",74,0,"",pip-requirements,selection_keyboard
|
35 |
+
34,106906,"requirements.txt",74,0," ",pip-requirements,content
|
36 |
+
35,106910,"requirements.txt",75,0,"",pip-requirements,selection_keyboard
|
37 |
+
36,107095,"requirements.txt",74,0,"",pip-requirements,selection_command
|
38 |
+
37,114295,"requirements.txt",61,0,"",pip-requirements,selection_command
|
39 |
+
38,114428,"requirements.txt",41,0,"",pip-requirements,selection_command
|
40 |
+
39,114530,"requirements.txt",43,0,"",pip-requirements,selection_command
|
41 |
+
40,114983,"requirements.txt",43,1,"[",pip-requirements,selection_command
|
42 |
+
41,115082,"requirements.txt",43,7,"[cuda12",pip-requirements,selection_command
|
43 |
+
42,115265,"requirements.txt",43,10,"[cuda12]>=",pip-requirements,selection_command
|
44 |
+
43,115682,"requirements.txt",43,9,"[cuda12]>",pip-requirements,selection_command
|
45 |
+
44,115832,"requirements.txt",43,8,"[cuda12]",pip-requirements,selection_command
|
46 |
+
45,116046,"requirements.txt",43,8,"",pip-requirements,content
|
47 |
+
46,134920,"train_dynamics.py",0,0,"",python,tab
|
48 |
+
47,141347,"train_dynamics.py",41,0,"",python,selection_command
|
49 |
+
48,141596,"train_dynamics.py",51,0,"",python,selection_command
|
50 |
+
49,141626,"train_dynamics.py",63,0,"",python,selection_command
|
51 |
+
50,141657,"train_dynamics.py",64,0,"",python,selection_command
|
52 |
+
51,141691,"train_dynamics.py",78,0,"",python,selection_command
|
53 |
+
52,141894,"train_dynamics.py",116,0,"",python,selection_command
|
54 |
+
53,141964,"train_dynamics.py",121,0,"",python,selection_command
|
55 |
+
54,142163,"train_dynamics.py",125,0,"",python,selection_command
|
56 |
+
55,142229,"train_dynamics.py",87,0,"",python,selection_command
|
57 |
+
56,145246,"train_dynamics.py",653,0,"",python,selection_command
|
58 |
+
57,145597,"train_dynamics.py",1241,0,"",python,selection_command
|
59 |
+
58,148942,"train_dynamics.py",1839,0,"",python,selection_command
|
60 |
+
59,150414,"train_dynamics.py",2485,0,"",python,selection_command
|
61 |
+
60,150985,"train_dynamics.py",3341,0,"",python,selection_command
|
62 |
+
61,153046,"train_dynamics.py",2485,0,"",python,selection_command
|
63 |
+
62,153663,"train_dynamics.py",2527,0,"",python,selection_command
|
64 |
+
63,153914,"train_dynamics.py",2586,0,"",python,selection_command
|
65 |
+
64,153946,"train_dynamics.py",2606,0,"",python,selection_command
|
66 |
+
65,153981,"train_dynamics.py",2642,0,"",python,selection_command
|
67 |
+
66,154015,"train_dynamics.py",2677,0,"",python,selection_command
|
68 |
+
67,154046,"train_dynamics.py",2738,0,"",python,selection_command
|
69 |
+
68,154081,"train_dynamics.py",2784,0,"",python,selection_command
|
70 |
+
69,154113,"train_dynamics.py",2853,0,"",python,selection_command
|
71 |
+
70,154146,"train_dynamics.py",2859,0,"",python,selection_command
|
72 |
+
71,154180,"train_dynamics.py",2903,0,"",python,selection_command
|
73 |
+
72,154213,"train_dynamics.py",2904,0,"",python,selection_command
|
74 |
+
73,154246,"train_dynamics.py",2909,0,"",python,selection_command
|
75 |
+
74,154280,"train_dynamics.py",2918,0,"",python,selection_command
|
76 |
+
75,154508,"train_dynamics.py",3753,0,"",python,selection_command
|
77 |
+
76,155160,"train_dynamics.py",4363,0,"",python,selection_command
|
78 |
+
77,155948,"train_dynamics.py",5218,0,"",python,selection_command
|
79 |
+
78,158479,"train_dynamics.py",6074,0,"",python,selection_command
|
80 |
+
79,161795,"train_dynamics.py",6732,0,"",python,selection_command
|
81 |
+
80,166966,"train_dynamics.py",7582,0,"",python,selection_command
|
82 |
+
81,170485,"train_dynamics.py",8668,0,"",python,selection_command
|
83 |
+
82,175934,"train_dynamics.py",7582,0,"",python,selection_command
|
84 |
+
83,177106,"train_dynamics.py",7569,0,"",python,selection_command
|
85 |
+
84,177361,"train_dynamics.py",7559,0,"",python,selection_command
|
86 |
+
85,177390,"train_dynamics.py",7482,0,"",python,selection_command
|
87 |
+
86,177422,"train_dynamics.py",7421,0,"",python,selection_command
|
88 |
+
87,177457,"train_dynamics.py",7341,0,"",python,selection_command
|
89 |
+
88,177488,"train_dynamics.py",7304,0,"",python,selection_command
|
90 |
+
89,178494,"train_dynamics.py",7341,0,"",python,selection_command
|
91 |
+
90,178693,"train_dynamics.py",7421,0,"",python,selection_command
|
92 |
+
91,178849,"train_dynamics.py",7482,0,"",python,selection_command
|
93 |
+
92,179027,"train_dynamics.py",7559,0,"",python,selection_command
|
94 |
+
93,179210,"train_dynamics.py",7569,0,"",python,selection_command
|
95 |
+
94,179506,"train_dynamics.py",7559,0,"",python,selection_command
|
96 |
+
95,179674,"train_dynamics.py",7482,0,"",python,selection_command
|
97 |
+
96,179833,"train_dynamics.py",7421,0,"",python,selection_command
|
98 |
+
97,179926,"train_dynamics.py",7482,0,"",python,selection_command
|
99 |
+
98,180126,"train_dynamics.py",7421,0,"",python,selection_command
|
100 |
+
99,180497,"train_dynamics.py",7482,0,"",python,selection_command
|
101 |
+
100,181016,"train_dynamics.py",7559,0,"",python,selection_command
|
102 |
+
101,181259,"train_dynamics.py",7482,0,"",python,selection_command
|
103 |
+
102,183280,"train_dynamics.py",7421,0,"",python,selection_command
|
104 |
+
103,183526,"train_dynamics.py",7341,0,"",python,selection_command
|
105 |
+
104,185378,"train_dynamics.py",7407,0,"",python,selection_command
|
106 |
+
105,185754,"train_dynamics.py",7401,0,"",python,selection_command
|
107 |
+
106,185894,"train_dynamics.py",7399,0,"",python,selection_command
|
108 |
+
107,186075,"train_dynamics.py",7388,0,"",python,selection_command
|
109 |
+
108,186198,"train_dynamics.py",7387,0,"",python,selection_command
|
110 |
+
109,186477,"train_dynamics.py",7377,0,"",python,selection_command
|
111 |
+
110,212701,"train_dynamics.py",2949,0,"",python,selection_command
|
112 |
+
111,212882,"train_dynamics.py",2992,0,"",python,selection_command
|
113 |
+
112,212978,"train_dynamics.py",3000,0,"",python,selection_command
|
114 |
+
113,213228,"train_dynamics.py",3002,0,"",python,selection_command
|
115 |
+
114,213261,"train_dynamics.py",3005,0,"",python,selection_command
|
116 |
+
115,213294,"train_dynamics.py",3006,0,"",python,selection_command
|
117 |
+
116,213327,"train_dynamics.py",3020,0,"",python,selection_command
|
118 |
+
117,213590,"train_dynamics.py",3021,0,"",python,selection_command
|
119 |
+
118,214182,"train_dynamics.py",2988,0,"",python,selection_command
|
120 |
+
119,215003,"train_dynamics.py",2992,0,"",python,selection_command
|
121 |
+
120,215250,"train_dynamics.py",3000,0,"",python,selection_command
|
122 |
+
121,215283,"train_dynamics.py",3002,0,"",python,selection_command
|
123 |
+
122,215321,"train_dynamics.py",3005,0,"",python,selection_command
|
124 |
+
123,215353,"train_dynamics.py",3006,0,"",python,selection_command
|
125 |
+
124,215386,"train_dynamics.py",3020,0,"",python,selection_command
|
126 |
+
125,215420,"train_dynamics.py",3021,0,"",python,selection_command
|
127 |
+
126,221808,"train_dynamics.py",2988,0,"",python,selection_command
|
128 |
+
127,221880,"train_dynamics.py",2992,0,"",python,selection_command
|
129 |
+
128,222128,"train_dynamics.py",3000,0,"",python,selection_command
|
130 |
+
129,222161,"train_dynamics.py",3002,0,"",python,selection_command
|
131 |
+
130,222190,"train_dynamics.py",3005,0,"",python,selection_command
|
132 |
+
131,222224,"train_dynamics.py",3006,0,"",python,selection_command
|
133 |
+
132,222259,"train_dynamics.py",3020,0,"",python,selection_command
|
134 |
+
133,222293,"train_dynamics.py",3021,0,"",python,selection_command
|
135 |
+
134,223174,"train_dynamics.py",2988,0,"",python,selection_command
|
136 |
+
135,223299,"train_dynamics.py",2992,0,"",python,selection_command
|
137 |
+
136,223551,"train_dynamics.py",3000,0,"",python,selection_command
|
138 |
+
137,223581,"train_dynamics.py",3002,0,"",python,selection_command
|
139 |
+
138,223614,"train_dynamics.py",3005,0,"",python,selection_command
|
140 |
+
139,223648,"train_dynamics.py",3006,0,"",python,selection_command
|
141 |
+
140,223681,"train_dynamics.py",3020,0,"",python,selection_command
|
142 |
+
141,223715,"train_dynamics.py",3021,0,"",python,selection_command
|
143 |
+
142,223749,"train_dynamics.py",3037,0,"",python,selection_command
|
144 |
+
143,224380,"train_dynamics.py",3021,0,"",python,selection_command
|
145 |
+
144,225595,"train_dynamics.py",2988,0,"",python,selection_command
|
146 |
+
145,225681,"train_dynamics.py",2992,0,"",python,selection_command
|
147 |
+
146,225938,"train_dynamics.py",3000,0,"",python,selection_command
|
148 |
+
147,225966,"train_dynamics.py",3002,0,"",python,selection_command
|
149 |
+
148,225999,"train_dynamics.py",3005,0,"",python,selection_command
|
150 |
+
149,226037,"train_dynamics.py",3006,0,"",python,selection_command
|
151 |
+
150,226070,"train_dynamics.py",3020,0,"",python,selection_command
|
152 |
+
151,226103,"train_dynamics.py",3021,0,"",python,selection_command
|
153 |
+
152,251190,"train_dynamics.py",2006,0,"",python,selection_command
|
154 |
+
153,251439,"train_dynamics.py",2045,0,"",python,selection_command
|
155 |
+
154,251470,"train_dynamics.py",2075,0,"",python,selection_command
|
156 |
+
155,251506,"train_dynamics.py",2091,0,"",python,selection_command
|
157 |
+
156,251539,"train_dynamics.py",2107,0,"",python,selection_command
|
158 |
+
157,251574,"train_dynamics.py",2130,0,"",python,selection_command
|
159 |
+
158,251760,"train_dynamics.py",2204,0,"",python,selection_command
|
160 |
+
159,252115,"train_dynamics.py",2210,0,"",python,selection_command
|
161 |
+
160,256408,"train_dynamics.py",2215,0,"",python,selection_command
|
162 |
+
161,256583,"train_dynamics.py",2217,0,"",python,selection_command
|
163 |
+
162,262598,"models/dynamics.py",0,0,"",python,tab
|
164 |
+
163,263846,"models/dynamics.py",416,0,"",python,selection_keyboard
|
165 |
+
164,271113,"models/dynamics.py",1359,0,"",python,selection_command
|
166 |
+
165,273515,"models/dynamics.py",1373,0,"",python,selection_command
|
167 |
+
166,273673,"models/dynamics.py",1389,0,"",python,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-f0382786-979c-4a6d-8e9b-f5977f18eb4f1753726151187-2025_07_28-20.09.13.67/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-f818bac9-3228-48bb-85cd-ad930fdb35d91752220838711-2025_07_11-10.00.40.248/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-faba6583-b2c9-4b94-9ba6-9f240428520a1750722089894-2025_06_23-22.50.32.930/source.csv
ADDED
@@ -0,0 +1,182 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,2,"src/recording.ts",0,0,"import * as fs from 'node:fs'\nimport * as path from 'node:path'\nimport * as vscode from 'vscode'\nimport * as readline from 'node:readline'\nimport axios from 'axios'\nimport { hasConsent, showConsentChangeDialog } from './consent'\nimport {\n getEditorFileName,\n escapeString,\n getEditorLanguage,\n notificationWithProgress,\n generateBaseFilePath,\n formatDisplayTime,\n getExportPath,\n logToOutput,\n formatSrtTime,\n getConfig,\n removeDoubleQuotes,\n unescapeString,\n addToGitignore,\n} from './utilities'\nimport { type File, ChangeType, type CSVRowBuilder, type Change, type Recording, type ConsentStatus } from './types'\nimport { extContext, statusBarItem, actionsProvider } from './extension'\n\nexport const commands = {\n openSettings: 'crowd-code.openSettings',\n startRecording: 'crowd-code.startRecording',\n stopRecording: 'crowd-code.stopRecording',\n panicButton: 'crowd-code.panicButton',\n}\n\nexport const recording: Recording = {\n isRecording: false,\n timer: 0,\n startDateTime: null,\n endDateTime: null,\n sequence: 0,\n customFolderName: '',\n activatedFiles: new Set<string>(),\n}\n\nlet intervalId: NodeJS.Timeout\nconst fileQueue: File[] = []\nlet isAppending = false\n\nlet uploadIntervalId: NodeJS.Timeout;\nconst sessionUuid = vscode.env.sessionId;\n\nlet panicStatusBarItem: vscode.StatusBarItem | undefined;\nlet panicButtonPressCount = 0;\nlet panicButtonTimeoutId: NodeJS.Timeout | undefined;\nlet accumulatedRemovedContent: Array<{content: string, sequence: number}> = []; // Store content with sequence numbers\n\nconst CROWD_CODE_API_GATEWAY_URL = process.env.CROWD_CODE_API_GATEWAY_URL;\n\nconst PANIC_BUTTON_TIMEOUT = 3000; // 3 seconds timeout for successive presses\n\n/**\n * Builds a CSV row with the given parameters.\n *\n * @param {CSVRowBuilder} sequence - The sequence number of the change.\n * @param {CSVRowBuilder} rangeOffset - The offset of the changed range.\n * @param {CSVRowBuilder} rangeLength - The length of the changed range.\n * @param {CSVRowBuilder} text - The text of the change.\n * @param {string} type - The type of the change (optional, defaults to 'content').\n * @return {string} A CSV row string with the provided information.\n */\nexport function buildCsvRow({\n sequence,\n rangeOffset,\n rangeLength,\n text,\n type = ChangeType.CONTENT,\n}: CSVRowBuilder): string | undefined {\n if (!recording.startDateTime) {\n return\n }\n\n const time = new Date().getTime() - recording.startDateTime.getTime()\n\n if (type === ChangeType.HEADING) {\n return 'Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type\n'\n }\n\n if (type === ChangeType.TERMINAL_FOCUS || type === ChangeType.TERMINAL_COMMAND || type === ChangeType.TERMINAL_OUTPUT) {\n return `${sequence},${time},""TERMINAL"",${rangeOffset},${rangeLength},""${escapeString(text)}"",,${type}\n`\n }\n\n const editorFileName = getEditorFileName()\n return `${sequence},${time},""${editorFileName}"",${rangeOffset},${rangeLength},""${escapeString(text)}"",${getEditorLanguage()},${type}\n`\n}\n\n/**\n * Checks if the current file being edited is within the configured export path.\n * This is used to determine if the current file should be recorded or not.\n *\n * @returns {boolean} `true` if the current file is within the export path, `false` otherwise.\n */\nexport function isCurrentFileExported(): boolean {\n const editor = vscode.window.activeTextEditor\n const filename = editor?.document.fileName.replaceAll('\\', '/')\n const exportPath = getExportPath()\n if (!editor || !filename || !exportPath) {\n return false\n }\n return filename.startsWith(exportPath)\n}\n\nconst onChangeSubscription = vscode.workspace.onDidChangeTextDocument(event => {\n if (!recording.isRecording) {\n return\n }\n\n if (isCurrentFileExported()) {\n return\n }\n const editor = vscode.window.activeTextEditor\n if (editor && event.document === editor.document) {\n for (const change of event.contentChanges) {\n recording.sequence++\n addToFileQueue(\n buildCsvRow({\n sequence: recording.sequence,\n rangeOffset: change.rangeOffset,\n rangeLength: change.rangeLength,\n text: change.text,\n })\n )\n appendToFile()\n }\n }\n})\n\n/**\n * Creates the recording folder if it doesn't exist.\n * @param folderPath - The path to the recording folder.\n */\nfunction createRecordingFolder(folderPath: string): void {\n if (!fs.existsSync(folderPath)) {\n fs.mkdirSync(folderPath, { recursive: true })\n }\n}\n\n/**\n * Starts the recording process and initializes necessary variables.\n */\nexport async function startRecording(): Promise<void> {\n if (recording.isRecording) {\n notificationWithProgress('Already recording')\n logToOutput('Already recording', 'info')\n return\n }\n const exportPath = getExportPath()\n if (!exportPath) {\n return\n }\n\n // If the setting is enabled and the path is inside the workspace, add it to .gitignore\n if (\n getConfig().get<boolean>('export.addToGitignore') &&\n getConfig().get<string>('export.exportPath')?.startsWith('${workspaceFolder}')\n ) {\n await addToGitignore()\n }\n\n recording.startDateTime = new Date()\n recording.activatedFiles = new Set<string>()\n\n // Ask for folder name if enabled in settings\n let customFolderName: string | undefined\n if (getConfig().get('recording.askFolderName')) {\n customFolderName = await vscode.window.showInputBox({\n prompt: 'Enter a name for the recording folder',\n placeHolder: 'Enter recording folder name',\n })\n if (!customFolderName) {\n stopRecording(true)\n return\n }\n recording.customFolderName = customFolderName\n }\n\n const baseFilePath = generateBaseFilePath(recording.startDateTime, false, recording.customFolderName, sessionUuid)\n if (!baseFilePath) {\n stopRecording(true)\n return\n }\n\n // Create the recording folder\n const folderPath = path.dirname(path.join(exportPath, baseFilePath))\n createRecordingFolder(folderPath)\n\n recording.isRecording = true\n recording.timer = 0\n recording.endDateTime = null\n recording.sequence = 0\n panicButtonPressCount = 0 // Reset panic button counter for new recording\n accumulatedRemovedContent = [] // Clear accumulated content for new recording\n if (panicButtonTimeoutId) {\n clearTimeout(panicButtonTimeoutId)\n panicButtonTimeoutId = undefined\n }\n intervalId = setInterval(() => {\n recording.timer++\n updateStatusBarItem()\n }, 1000)\n notificationWithProgress('Recording started')\n logToOutput('Recording started', 'info')\n\n // Only log initial editor content if there's an active text editor\n const editorText = vscode.window.activeTextEditor?.document.getText()\n const activeEditorUri = vscode.window.activeTextEditor?.document.uri.toString()\n\n if (editorText !== undefined && activeEditorUri) {\n recording.sequence++\n const csvRow = {\n sequence: recording.sequence,\n rangeOffset: 0,\n rangeLength: 0,\n text: editorText,\n type: ChangeType.TAB,\n }\n addToFileQueue(buildCsvRow({ ...csvRow, type: ChangeType.HEADING }))\n addToFileQueue(buildCsvRow(csvRow))\n appendToFile()\n recording.activatedFiles.add(activeEditorUri)\n actionsProvider.setCurrentFile(vscode.window.activeTextEditor?.document.fileName || '')\n } else {\n // If no active editor, just add the header row\n recording.sequence++\n addToFileQueue(buildCsvRow({ \n sequence: recording.sequence,\n rangeOffset: 0,\n rangeLength: 0,\n text: '',\n type: ChangeType.HEADING \n }))\n appendToFile()\n }\n\n extContext.subscriptions.push(onChangeSubscription)\n updateStatusBarItem()\n updatePanicButton()\n actionsProvider.setRecordingState(true)\n\n // Set up a timer to send data to the Lambda endpoint periodically\n uploadIntervalId = setInterval(async () => {\n if (!exportPath) {\n return;\n }\n \n if (typeof CROWD_CODE_API_GATEWAY_URL !== 'string' || !CROWD_CODE_API_GATEWAY_URL.trim()) {\n logToOutput(""CROWD_CODE_API_GATEWAY_URL must be a non-empty string. Please check your build configuration."", 'error');\n return;\n }\n\n // Only upload data if user has given consent\n if (!hasConsent()) {\n return;\n }\n\n const filePath = path.join(exportPath, `${baseFilePath}.csv`);\n const extensionVersion = extContext.extension.packageJSON.version as string;\n const userId = extContext.globalState.get<string>('userId');\n\n try {\n const fileContent = await fs.promises.readFile(filePath, 'utf-8');\n\n if (fileContent) {\n const payload = {\n fileName: `${baseFilePath}.csv`,\n content: fileContent,\n version: extensionVersion,\n userId: userId\n };\n await axios.post(CROWD_CODE_API_GATEWAY_URL, payload);\n console.log(`Successfully sent ${payload.fileName} to Lambda endpoint.`);\n logToOutput(`Successfully sent to Lambda endpoint.`, 'info');\n }\n } catch (error: any) {\n if (error.code === 'ENOENT') {\n console.warn(`File not found at ${filePath}. It might be created on first write.`);\n } else {\n console.error(`Error sending data to Lambda: ${error.message}`);\n if (axios.isAxiosError(error) && error.response) {\n console.error(""Lambda response status:"", error.response.status);\n console.error(""Lambda response data:"", error.response.data);\n }\n }\n }\n }, 1 * 60 * 1000); // 5 minutes\n}\n\n/**\n * Stops the recording process and finalizes the recording data.\n * @param context - The extension context.\n */\nexport function stopRecording(force = false): Promise<void> | void {\n if (!recording.isRecording) {\n notificationWithProgress('Not recording')\n return\n }\n\n recording.isRecording = false\n clearInterval(intervalId)\n clearInterval(uploadIntervalId); // Clear the upload timer\n recording.timer = 0\n recording.activatedFiles?.clear()\n panicButtonPressCount = 0 // Reset panic button counter when recording stops\n accumulatedRemovedContent = [] // Clear accumulated content when recording stops\n if (panicButtonTimeoutId) {\n clearTimeout(panicButtonTimeoutId)\n panicButtonTimeoutId = undefined\n }\n const index = extContext.subscriptions.indexOf(onChangeSubscription)\n if (index !== -1) {\n extContext.subscriptions.splice(index, 1)\n }\n updateStatusBarItem()\n updatePanicButton()\n actionsProvider.setRecordingState(false)\n if (force) {\n notificationWithProgress('Recording cancelled')\n logToOutput('Recording cancelled', 'info')\n recording.customFolderName = undefined\n return\n }\n notificationWithProgress('Recording finished')\n logToOutput('Recording finished', 'info')\n recording.endDateTime = new Date()\n return processCsvFile().then(() => {\n // Reset customFolderName after processing is complete\n recording.customFolderName = undefined\n }).catch(err => {\n logToOutput(`Error processing CSV file during stop: ${String(err)}`, 'error')\n recording.customFolderName = undefined\n });\n}\n\n/**\n * Appends data from the file queue to the appropriate file in the workspace.\n */\nexport async function appendToFile(): Promise<void> {\n if (isAppending) {\n return\n }\n isAppending = true\n\n const exportPath = getExportPath()\n if (!exportPath) {\n logToOutput('Export path not available in appendToFile, stopping recording.', 'error')\n stopRecording(true)\n isAppending = false\n return\n }\n\n while (fileQueue.length > 0) {\n const itemToAppend = fileQueue.shift()\n if (!itemToAppend) {\n continue\n }\n\n const filePath = path.join(exportPath, itemToAppend.name)\n\n try {\n const directory = path.dirname(filePath)\n if (!fs.existsSync(directory)) {\n fs.mkdirSync(directory, { recursive: true })\n }\n await fs.promises.appendFile(filePath, itemToAppend.content)\n } catch (err) {\n logToOutput(\n `Failed to append to file ${filePath}: ${err}. Item dropped. Content: ${itemToAppend.content.substring(0, 100)}...`,\n 'error'\n )\n }\n }\n isAppending = false\n}\n\n/**\n * Appends an SRT line to the file queue for the previous change.\n *\n * This function is responsible for generating the SRT format line for the previous change and adding it to the file queue.\n * It checks if the SRT export format is enabled, and if so, it generates the SRT line for the previous change and adds it to the file queue.\n *\n * @param processedChanges - An array of processed changes.\n * @param i - The index of the current change in the processedChanges array.\n * @param exportInSrt - A boolean indicating whether the SRT export format is enabled.\n */\nfunction addToSRTFile(processedChanges: Change[], i: number, exportInSrt: boolean) {\n if (!exportInSrt) {\n return\n }\n if (i === 0) {\n return\n }\n addToFileQueue(\n addSrtLine(\n processedChanges[i - 1].sequence,\n processedChanges[i - 1].startTime,\n processedChanges[i - 1].endTime,\n JSON.stringify({\n text: processedChanges[i - 1].text,\n file: processedChanges[i - 1].file,\n language: processedChanges[i - 1].language,\n })\n ),\n 'srt',\n true\n )\n}\n\n/**\n * Returns the new text content based on the change type and the previous change.\n * @param type - The type of the change.\n * @param text - The text of the change.\n * @param previousChange - The previous change.\n * @param rangeOffset - The offset of the range.\n * @param rangeLength - The length of the range.\n */\nfunction getNewTextContent(\n type: string,\n text: string,\n previousChange: Change | null,\n rangeOffset: number,\n rangeLength: number\n): string {\n if (type === ChangeType.TAB) {\n return text\n }\n if (!previousChange) {\n return ''\n }\n return getUpdatedText(previousChange.text, rangeOffset, rangeLength, text)\n}\n\n/**\n * Processes a single CSV line and returns the processed change\n */\nasync function processCSVLine(line: string, previousChange: Change | null): Promise<Change | null> {\n const lineArr = line.split(/,(?=(?:[^""]*""[^""]*"")*[^""]*$)/)\n\n if (Number.isNaN(Number.parseInt(lineArr[0]))) {\n return null\n }\n\n const time = Number.parseInt(lineArr[1])\n const file = removeDoubleQuotes(lineArr[2])\n const rangeOffset = Number.parseInt(lineArr[3])\n const rangeLength = Number.parseInt(lineArr[4])\n const text = unescapeString(removeDoubleQuotes(lineArr[5]))\n const language = lineArr[6]\n const type = lineArr[7]\n\n const newText = getNewTextContent(type, text, previousChange, rangeOffset, rangeLength)\n\n /**\n * Skip exporting changes with the same values to the previous change.\n */\n if (\n previousChange &&\n time === previousChange.startTime &&\n file === previousChange.file &&\n newText === previousChange.text &&\n language === previousChange.language\n ) {\n return null\n }\n\n return {\n sequence: previousChange ? previousChange.sequence + 1 : 1,\n file,\n startTime: time,\n endTime: 0,\n language,\n text: newText,\n }\n}\n\n/**\n * Returns the updated text content based on the previous text, range offset, range length, and new text.\n * @param previousText - The previous text.\n * @param rangeOffset - The offset of the range.\n * @param rangeLength - The length of the range.\n * @param newText - The new text.\n */\nfunction getUpdatedText(\n previousText: string,\n rangeOffset: number,\n rangeLength: number,\n newText: string\n): string {\n const textArray = previousText.split('')\n textArray.splice(rangeOffset, rangeLength, newText)\n return textArray.join('')\n}\n\n/**\n * Processes the CSV file and generates the necessary output files.\n */\nasync function processCsvFile(): Promise<void> {\n if (!validateRecordingState()) {\n return\n }\n\n const exportFormats = getConfig().get<string[]>('export.exportFormats', [])\n if (exportFormats.length === 0) {\n logToOutput('No export formats specified', 'info')\n vscode.window.showWarningMessage('No export formats specified')\n return\n }\n\n const exportPath = getExportPath()\n if (!exportPath) {\n return\n }\n\n if (!recording.startDateTime) {\n return\n }\n\n // Use the same custom folder name for reading the source file\n const baseFilePathSource = generateBaseFilePath(\n recording.startDateTime,\n false,\n recording.customFolderName,\n sessionUuid\n )\n if (!baseFilePathSource) {\n return\n }\n\n const filePath = path.join(exportPath, `${baseFilePathSource}.csv`)\n\n try {\n if (!fs.existsSync(filePath)) {\n throw new Error(`Source file not found: ${filePath}`)\n }\n\n const processedChanges: Change[] = []\n\n const rl = readline.createInterface({\n input: fs.createReadStream(filePath),\n crlfDelay: Number.POSITIVE_INFINITY,\n })\n\n for await (const line of rl) {\n const previousChange = processedChanges[processedChanges.length - 1]\n const change = await processCSVLine(line, previousChange)\n\n if (change) {\n if (previousChange) {\n previousChange.endTime = change.startTime\n if (exportFormats.includes('SRT')) {\n addToSRTFile(processedChanges, processedChanges.length, true)\n }\n }\n processedChanges.push(change)\n }\n }\n\n rl.close();\n\n return finalizeRecording(processedChanges, exportFormats);\n\n } catch (err) {\n vscode.window.showErrorMessage(`Error processing recording: ${err}`)\n logToOutput('Error processing CSV file: ' + String(err), 'error')\n return Promise.resolve(); // Resolve even on error after showing message\n }\n}\n\nfunction validateRecordingState(): boolean {\n if (!vscode.workspace.workspaceFolders) {\n logToOutput(\n 'No workspace folder found. To process the recording is needed a workspace folder',\n 'error'\n )\n return false\n }\n if (!recording.endDateTime || !recording.startDateTime) {\n logToOutput('Recording date time is not properly set', 'error')\n return false\n }\n return true\n}\n\nfunction finalizeRecording(processedChanges: Change[], exportFormats: string[]): Promise<void> {\n const lastChange = processedChanges[processedChanges.length - 1]\n if (lastChange && recording.endDateTime && recording.startDateTime) {\n lastChange.endTime = recording.endDateTime.getTime() - recording.startDateTime.getTime()\n if (exportFormats.includes('SRT')) {\n addToSRTFile(processedChanges, processedChanges.length, true)\n }\n }\n if (exportFormats.includes('JSON')) {\n addToFileQueue(JSON.stringify(processedChanges), 'json', true)\n }\n return appendToFile().then(() => {\n // Refresh the recordFiles view after export is complete\n vscode.commands.executeCommand('crowd-code.refreshRecordFiles')\n })\n}\n\n/**\n * Adds a line to the SRT file format.\n * @param sequence - The sequence number of the change.\n * @param start - The start time of the change.\n * @param end - The end time of the change.\n * @param text - The text of the change.\n * @returns A string representing a line in the SRT file format.\n */\nfunction addSrtLine(sequence: number, start: number, end: number, text: string): string {\n return `${sequence}\n${formatSrtTime(start)} --> ${formatSrtTime(end)}\n${text}\n\n`\n}\n\n/**\n * Adds content to the file queue.\n * @param content - The content to add.\n * @param fileExtension - The file extension (optional, defaults to 'csv').\n */\nexport function addToFileQueue(\n content: string | undefined,\n fileExtension = 'csv',\n isExport = false\n): void {\n if (!content) {\n return\n }\n if (!recording.startDateTime) {\n return\n }\n // Use the same custom name throughout the recording session\n const baseFilePath = generateBaseFilePath(recording.startDateTime, isExport, recording.customFolderName, sessionUuid)\n if (!baseFilePath) {\n return\n }\n fileQueue.push({\n name: `${baseFilePath}.${fileExtension}`,\n content: content,\n })\n}\n\n/**\n * Updates the status bar item with the current recording status and time.\n */\nexport function updateStatusBarItem(): void {\n if (recording.isRecording) {\n if (getConfig().get('appearance.showTimer') === false) {\n statusBarItem.text = '$(debug-stop)'\n statusBarItem.tooltip = 'Current time: ' + formatDisplayTime(recording.timer)\n }\n if (getConfig().get('appearance.showTimer') === true) {\n statusBarItem.text = '$(debug-stop) ' + formatDisplayTime(recording.timer)\n statusBarItem.tooltip = 'Stop Recording'\n }\n statusBarItem.command = commands.stopRecording\n statusBarItem.show()\n } else {\n const editor = vscode.window.activeTextEditor\n if (!editor) {\n statusBarItem.hide()\n return\n }\n if (getConfig().get('appearance.minimalMode') === true) {\n statusBarItem.text = '$(circle-large-filled)'\n } else {\n statusBarItem.text = '$(circle-large-filled) Start Recording'\n }\n statusBarItem.tooltip = 'Start Recording'\n statusBarItem.command = commands.startRecording\n statusBarItem.show()\n }\n}\n\n/**\n * Creates and updates the panic button status bar item.\n */\nexport function updatePanicButton(): void {\n if (!recording.isRecording) {\n if (panicStatusBarItem) {\n panicStatusBarItem.hide()\n }\n return\n }\n\n // Create panic button if it doesn't exist\n if (!panicStatusBarItem) {\n panicStatusBarItem = vscode.window.createStatusBarItem(vscode.StatusBarAlignment.Right, 8999) // Position it to the left of the recording button\n extContext.subscriptions.push(panicStatusBarItem)\n }\n\n const secondsToRemove = (panicButtonPressCount + 1) * 10 // Show what the next press will remove\n panicStatusBarItem.text = '$(refresh)'\n panicStatusBarItem.tooltip = `Remove last ${secondsToRemove} seconds of recording (click again within 3 seconds to remove more)`\n panicStatusBarItem.command = commands.panicButton\n panicStatusBarItem.show()\n}\n\n/**\n * Deletes the last N seconds of recording data from the CSV file.\n * This is a ""panic button"" feature that allows users to quickly remove recent sensitive data.\n * Each successive press within 3 seconds removes more time: 10s, 20s, 30s, etc.\n * After 3 seconds of inactivity, the next press will be treated as a fresh press (10s).\n */\nexport async function panicButton(): Promise<void> {\n if (!recording.isRecording) {\n vscode.window.showWarningMessage('No active recording to remove data from')\n logToOutput('No active recording to remove data from', 'info')\n return\n }\n\n if (!recording.startDateTime) {\n vscode.window.showErrorMessage('Recording start time not available')\n logToOutput('Recording start time not available', 'error')\n return\n }\n\n const exportPath = getExportPath()\n if (!exportPath) {\n vscode.window.showErrorMessage('Export path not available')\n logToOutput('Export path not available', 'error')\n return\n }\n\n const baseFilePath = generateBaseFilePath(recording.startDateTime, false, recording.customFolderName, sessionUuid)\n if (!baseFilePath) {\n vscode.window.showErrorMessage('Could not generate file path')\n logToOutput('Could not generate file path', 'error')\n return\n }\n\n const filePath = path.join(exportPath, `${baseFilePath}.csv`)\n\n try {\n // Check if file exists\n if (!fs.existsSync(filePath)) {\n vscode.window.showWarningMessage('No recording file found to remove data from')\n logToOutput('No recording file found to remove data from', 'info')\n return\n }\n\n // Read the file\n const content = fs.readFileSync(filePath, 'utf-8')\n const lines = content.split('\n')\n \n if (lines.length <= 1) {\n vscode.window.showWarningMessage('Recording file is empty, nothing to remove')\n logToOutput('Recording file is empty, nothing to remove', 'info')\n return\n }\n\n // Calculate how many lines to remove (10 seconds per press)\n const linesToRemove = Math.min((panicButtonPressCount + 1) * 10, lines.length - 1)\n const newLines = lines.slice(0, lines.length - linesToRemove)\n \n // Capture the lines that will be removed for display\n const removedLines = lines.slice(lines.length - linesToRemove)\n\n // Write back to file\n fs.writeFileSync(filePath, newLines.join('\n'))\n\n // Update panic button state\n panicButtonPressCount++\n \n // Set up timeout to reset the counter after 3 seconds of inactivity\n if (panicButtonTimeoutId) {\n clearTimeout(panicButtonTimeoutId)\n }\n panicButtonTimeoutId = setTimeout(() => {\n panicButtonPressCount = 0\n accumulatedRemovedContent = [] // Clear accumulated content\n updatePanicButton()\n }, PANIC_BUTTON_TIMEOUT)\n \n updatePanicButton()\n\n const secondsToRemove = panicButtonPressCount * 10\n const actualLinesRemoved = lines.length - newLines.length\n \n // Accumulate removed content and show immediate popup\n if (removedLines.length > 0) {\n const nonEmptyLines = removedLines.filter(line => line.trim())\n if (nonEmptyLines.length > 0) {\n // Create a simple, readable summary of removed content\n const contentSummary = nonEmptyLines.map(line => {\n // Extract just the text content from CSV for cleaner display\n const parts = line.split(',')\n if (parts.length >= 6) {\n const textContent = parts[5].replace(/^""|""$/g, '') // Remove quotes\n // Clean up common escape sequences\n const cleanText = textContent\n .replace(/\\n/g, '\n')\n .replace(/\\t/g, '\t')\n .replace(/\\r/g, '\r')\n return { content: cleanText, sequence: Number.parseInt(parts[0]) }\n }\n return { content: line, sequence: Number.parseInt(line.split(',')[0]) }\n }).filter(item => item.content.trim().length > 0)\n \n // Add to accumulated content\n accumulatedRemovedContent.push(...contentSummary)\n \n // Sort by sequence number to show in original file order\n const sortedContent = accumulatedRemovedContent.sort((a, b) => a.sequence - b.sequence)\n \n // Show immediate popup with accumulated content\n const totalContent = sortedContent.map(item => item.content).join(' ')\n const summaryText = totalContent.length > 100 \n ? totalContent.substring(0, 100) + '...' \n : totalContent\n \n vscode.window.showInformationMessage(\n `Removed content: ""${summaryText}""`,\n 'Dismiss'\n )\n }\n }\n\n } catch (error) {\n const errorMessage = `Error during panic button operation: ${error}`\n vscode.window.showErrorMessage(errorMessage)\n logToOutput(errorMessage, 'error')\n }\n}",typescript,tab
|
3 |
+
2,78,"extension-output-pdoom-org.crowd-code-#5-crowd-code",0,0,"10:50:32 PM [info] Activating crowd-code\n10:50:32 PM [info] Recording started\n10:50:32 PM [info] Initializing git provider using file system watchers...\n10:50:32 PM [info] Git repository found\n10:50:32 PM [info] Git provider initialized successfully\n10:50:32 PM [info] Initial git state: [object Object]\n",Log,tab
|
4 |
+
3,2192,"src/recording.ts",0,0,"",typescript,tab
|
5 |
+
4,2193,"src/recording.ts",10135,0,"",typescript,selection_mouse
|
6 |
+
5,256675,"src/recording.ts",9085,0,"",typescript,selection_command
|
7 |
+
6,310049,"src/recording.ts",9119,0,"",typescript,selection_command
|
8 |
+
7,310303,"src/recording.ts",9172,0,"",typescript,selection_command
|
9 |
+
8,310329,"src/recording.ts",9214,0,"",typescript,selection_command
|
10 |
+
9,310363,"src/recording.ts",9261,0,"",typescript,selection_command
|
11 |
+
10,310399,"src/recording.ts",9291,0,"",typescript,selection_command
|
12 |
+
11,310431,"src/recording.ts",9315,0,"",typescript,selection_command
|
13 |
+
12,310681,"src/recording.ts",9386,0,"",typescript,selection_command
|
14 |
+
13,311616,"src/recording.ts",9476,0,"",typescript,selection_command
|
15 |
+
14,313315,"src/recording.ts",9454,78,"",typescript,content
|
16 |
+
15,313342,"src/recording.ts",9466,0,"",typescript,selection_command
|
17 |
+
16,314505,"src/recording.ts",9466,0," logToOutput(`Successfully sent to Lambda endpoint.`, 'info');\n ",typescript,content
|
18 |
+
17,314508,"src/recording.ts",9476,0,"",typescript,selection_command
|
19 |
+
18,316423,"src/recording.ts",9454,78,"",typescript,content
|
20 |
+
19,316439,"src/recording.ts",9466,0,"",typescript,selection_command
|
21 |
+
20,317197,"src/recording.ts",9480,0,"",typescript,selection_command
|
22 |
+
21,317443,"src/recording.ts",9511,0,"",typescript,selection_command
|
23 |
+
22,317478,"src/recording.ts",9554,0,"",typescript,selection_command
|
24 |
+
23,317508,"src/recording.ts",9654,0,"",typescript,selection_command
|
25 |
+
24,317544,"src/recording.ts",9675,0,"",typescript,selection_command
|
26 |
+
25,317578,"src/recording.ts",9756,0,"",typescript,selection_command
|
27 |
+
26,317611,"src/recording.ts",9823,0,"",typescript,selection_command
|
28 |
+
27,317645,"src/recording.ts",9908,0,"",typescript,selection_command
|
29 |
+
28,317679,"src/recording.ts",9989,0,"",typescript,selection_command
|
30 |
+
29,317794,"src/recording.ts",10007,0,"",typescript,selection_command
|
31 |
+
30,318048,"src/recording.ts",10017,0,"",typescript,selection_command
|
32 |
+
31,318077,"src/recording.ts",10031,0,"",typescript,selection_command
|
33 |
+
32,318441,"src/recording.ts",10030,0,"",typescript,selection_command
|
34 |
+
33,318590,"src/recording.ts",10028,0,"",typescript,selection_command
|
35 |
+
34,318767,"src/recording.ts",10026,0,"",typescript,selection_command
|
36 |
+
35,319213,"src/recording.ts",10026,1,"5",typescript,content
|
37 |
+
36,320144,"src/recording.ts",10016,0,"",typescript,selection_command
|
38 |
+
37,320398,"src/recording.ts",10002,0,"",typescript,selection_command
|
39 |
+
38,320427,"src/recording.ts",9984,0,"",typescript,selection_command
|
40 |
+
39,320465,"src/recording.ts",9903,0,"",typescript,selection_command
|
41 |
+
40,320493,"src/recording.ts",9818,0,"",typescript,selection_command
|
42 |
+
41,320528,"src/recording.ts",9751,0,"",typescript,selection_command
|
43 |
+
42,320562,"src/recording.ts",9670,0,"",typescript,selection_command
|
44 |
+
43,320594,"src/recording.ts",9649,0,"",typescript,selection_command
|
45 |
+
44,320629,"src/recording.ts",9549,0,"",typescript,selection_command
|
46 |
+
45,320662,"src/recording.ts",9506,0,"",typescript,selection_command
|
47 |
+
46,320698,"src/recording.ts",9475,0,"",typescript,selection_command
|
48 |
+
47,321493,"src/recording.ts",9461,0,"",typescript,selection_command
|
49 |
+
48,321915,"src/recording.ts",9371,0,"",typescript,selection_command
|
50 |
+
49,322434,"src/recording.ts",9300,0,"",typescript,selection_command
|
51 |
+
50,329951,"src/recording.ts",9281,0,"",typescript,selection_command
|
52 |
+
51,330204,"src/recording.ts",9246,0,"",typescript,selection_command
|
53 |
+
52,330230,"src/recording.ts",9199,0,"",typescript,selection_command
|
54 |
+
53,330375,"src/recording.ts",9157,0,"",typescript,selection_command
|
55 |
+
54,330540,"src/recording.ts",9104,0,"",typescript,selection_command
|
56 |
+
55,330712,"src/recording.ts",9157,0,"",typescript,selection_command
|
57 |
+
56,330910,"src/recording.ts",9170,0,"",typescript,selection_command
|
58 |
+
57,330953,"src/recording.ts",9117,0,"",typescript,selection_command
|
59 |
+
58,331166,"src/recording.ts",9083,0,"",typescript,selection_command
|
60 |
+
59,331302,"src/recording.ts",9085,0,"",typescript,selection_command
|
61 |
+
60,331503,"src/recording.ts",9093,0,"",typescript,selection_command
|
62 |
+
61,331610,"src/recording.ts",9127,0,"",typescript,selection_command
|
63 |
+
62,331751,"src/recording.ts",9130,0,"",typescript,selection_command
|
64 |
+
63,350255,"src/recording.ts",9183,0,"",typescript,selection_command
|
65 |
+
64,350406,"src/recording.ts",9225,0,"",typescript,selection_command
|
66 |
+
65,350598,"src/recording.ts",9272,0,"",typescript,selection_command
|
67 |
+
66,352808,"src/recording.ts",9225,0,"",typescript,selection_command
|
68 |
+
67,354872,"src/recording.ts",9183,0,"",typescript,selection_command
|
69 |
+
68,359461,"src/recording.ts",9130,0,"",typescript,selection_command
|
70 |
+
69,359617,"src/recording.ts",9095,0,"",typescript,selection_command
|
71 |
+
70,359673,"src/recording.ts",9117,0,"",typescript,selection_command
|
72 |
+
71,359794,"src/recording.ts",9083,0,"",typescript,selection_command
|
73 |
+
72,359976,"src/recording.ts",9052,0,"",typescript,selection_command
|
74 |
+
73,360191,"src/recording.ts",9031,0,"",typescript,selection_command
|
75 |
+
74,360447,"src/recording.ts",8972,0,"",typescript,selection_command
|
76 |
+
75,360582,"src/recording.ts",8982,0,"",typescript,selection_command
|
77 |
+
76,360831,"src/recording.ts",8984,0,"",typescript,selection_command
|
78 |
+
77,360860,"src/recording.ts",8990,0,"",typescript,selection_command
|
79 |
+
78,360894,"src/recording.ts",8992,0,"",typescript,selection_command
|
80 |
+
79,360925,"src/recording.ts",8993,0,"",typescript,selection_command
|
81 |
+
80,360961,"src/recording.ts",9001,0,"",typescript,selection_command
|
82 |
+
81,360991,"src/recording.ts",9002,0,"",typescript,selection_command
|
83 |
+
82,361027,"src/recording.ts",9010,0,"",typescript,selection_command
|
84 |
+
83,361058,"src/recording.ts",9011,0,"",typescript,selection_command
|
85 |
+
84,361092,"src/recording.ts",9019,0,"",typescript,selection_command
|
86 |
+
85,361127,"src/recording.ts",9021,0,"",typescript,selection_command
|
87 |
+
86,361386,"src/recording.ts",9019,0,"",typescript,selection_command
|
88 |
+
87,361536,"src/recording.ts",9011,0,"",typescript,selection_command
|
89 |
+
88,371148,"src/recording.ts",9031,0,"",typescript,selection_command
|
90 |
+
89,371398,"src/recording.ts",9061,0,"",typescript,selection_command
|
91 |
+
90,371425,"src/recording.ts",9095,0,"",typescript,selection_command
|
92 |
+
91,371594,"src/recording.ts",9148,0,"",typescript,selection_command
|
93 |
+
92,372005,"src/recording.ts",9147,0,"",typescript,selection_command
|
94 |
+
93,372143,"src/recording.ts",9144,0,"",typescript,selection_command
|
95 |
+
94,372278,"src/recording.ts",9142,0,"",typescript,selection_command
|
96 |
+
95,372403,"src/recording.ts",9130,0,"",typescript,selection_command
|
97 |
+
96,373188,"src/recording.ts",8762,0,"",typescript,selection_command
|
98 |
+
97,373475,"src/recording.ts",6202,0,"",typescript,selection_command
|
99 |
+
98,401363,"src/recording.ts",6203,0,"",typescript,selection_mouse
|
100 |
+
99,471528,"src/recording.ts",6144,72," const folderPath = path.dirname(path.join(exportPath, baseFilePath))",typescript,selection_command
|
101 |
+
100,472180,"src/recording.ts",6203,0,"",typescript,selection_command
|
102 |
+
101,474540,"src/recording.ts",6202,0,"",typescript,selection_command
|
103 |
+
102,474720,"src/recording.ts",6200,0,"",typescript,selection_command
|
104 |
+
103,474892,"src/recording.ts",6190,0,"",typescript,selection_command
|
105 |
+
104,475017,"src/recording.ts",6189,0,"",typescript,selection_command
|
106 |
+
105,475195,"src/recording.ts",6185,0,"",typescript,selection_command
|
107 |
+
106,475350,"src/recording.ts",6184,0,"",typescript,selection_command
|
108 |
+
107,475507,"src/recording.ts",6180,0,"",typescript,selection_command
|
109 |
+
108,475678,"src/recording.ts",6179,0,"",typescript,selection_command
|
110 |
+
109,475981,"src/recording.ts",6172,0,"",typescript,selection_command
|
111 |
+
110,476576,"src/recording.ts",6215,0,"",typescript,selection_command
|
112 |
+
111,563556,"src/recording.ts",6148,0,"",typescript,selection_command
|
113 |
+
112,563913,"src/recording.ts",6154,0,"",typescript,selection_command
|
114 |
+
113,565484,"src/recording.ts",6165,0,"",typescript,selection_command
|
115 |
+
114,565741,"src/recording.ts",6167,0,"",typescript,selection_command
|
116 |
+
115,565766,"src/recording.ts",6171,0,"",typescript,selection_command295,2976491,"src/recording.ts",28645,0,"\n",typescript,content
|
117 |
+
296,2976613,"src/recording.ts",28646,0,"s",typescript,content
|
118 |
+
297,2976614,"src/recording.ts",28647,0,"",typescript,selection_keyboard
|
119 |
+
298,2976738,"src/recording.ts",28647,0,"u",typescript,content
|
120 |
+
299,2976741,"src/recording.ts",28648,0,"",typescript,selection_keyboard
|
121 |
+
300,2976798,"src/recording.ts",28648,0,"p",typescript,content
|
122 |
+
301,2976801,"src/recording.ts",28649,0,"",typescript,selection_keyboard
|
123 |
+
302,2976934,"src/recording.ts",28649,0,"e",typescript,content
|
124 |
+
303,2976957,"src/recording.ts",28650,0,"",typescript,selection_keyboard
|
125 |
+
304,2976992,"src/recording.ts",28650,0,"r",typescript,content
|
126 |
+
305,2976997,"src/recording.ts",28651,0,"",typescript,selection_keyboard
|
127 |
+
306,2977200,"src/recording.ts",28651,0,"s",typescript,content
|
128 |
+
307,2977202,"src/recording.ts",28652,0,"",typescript,selection_keyboard
|
129 |
+
308,2977525,"src/recording.ts",28652,0,"e",typescript,content
|
130 |
+
309,2977530,"src/recording.ts",28653,0,"",typescript,selection_keyboard
|
131 |
+
310,2977634,"src/recording.ts",28653,0,"c",typescript,content
|
132 |
+
311,2977635,"src/recording.ts",28654,0,"",typescript,selection_keyboard
|
133 |
+
312,2977825,"src/recording.ts",28654,0,"r",typescript,content
|
134 |
+
313,2977826,"src/recording.ts",28655,0,"",typescript,selection_keyboard
|
135 |
+
314,2977896,"src/recording.ts",28655,0,"e",typescript,content324,2997670,"src/recording.ts",28659,0,"",typescript,selection_command
|
136 |
+
325,2998126,"src/recording.ts",28646,14,"",typescript,content
|
137 |
+
326,3005814,"src/recording.ts",28646,0,"s",typescript,content
|
138 |
+
327,3005815,"src/recording.ts",28647,0,"",typescript,selection_keyboard
|
139 |
+
328,3005883,"src/recording.ts",28647,0,"e",typescript,content
|
140 |
+
329,3005888,"src/recording.ts",28648,0,"",typescript,selection_keyboard
|
141 |
+
330,3006331,"src/recording.ts",28648,0,"c",typescript,content
|
142 |
+
331,3006335,"src/recording.ts",28649,0,"",typescript,selection_keyboard
|
143 |
+
332,3006535,"src/recording.ts",28649,0,"r",typescript,content
|
144 |
+
333,3006538,"src/recording.ts",28650,0,"",typescript,selection_keyboard
|
145 |
+
334,3006577,"src/recording.ts",28650,0,"e",typescript,content
|
146 |
+
335,3006581,"src/recording.ts",28651,0,"",typescript,selection_keyboard
|
147 |
+
336,3006735,"src/recording.ts",28651,0,"t",typescript,content
|
148 |
+
337,3006738,"src/recording.ts",28652,0,"",typescript,selection_keyboard
|
149 |
+
338,3006976,"src/recording.ts",28652,0,"p",typescript,content
|
150 |
+
339,3006979,"src/recording.ts",28653,0,"",typescript,selection_keyboard
|
151 |
+
340,3007037,"src/recording.ts",28653,0,"a",typescript,content380,3137765,"src/recording.ts",28814,0,"",typescript,selection_command
|
152 |
+
381,3138176,"src/recording.ts",28647,168,"",typescript,content
|
153 |
+
382,3140498,"src/recording.ts",28647,0,"s",typescript,content
|
154 |
+
383,3140500,"src/recording.ts",28648,0,"",typescript,selection_keyboard
|
155 |
+
384,3140565,"src/recording.ts",28648,0,"e",typescript,content
|
156 |
+
385,3140566,"src/recording.ts",28649,0,"",typescript,selection_keyboard
|
157 |
+
386,3140680,"src/recording.ts",28649,0,"c",typescript,content
|
158 |
+
387,3140684,"src/recording.ts",28650,0,"",typescript,selection_keyboard
|
159 |
+
388,3140885,"src/recording.ts",28650,0,"r",typescript,content
|
160 |
+
389,3140889,"src/recording.ts",28651,0,"",typescript,selection_keyboard
|
161 |
+
390,3140958,"src/recording.ts",28651,0,"e",typescript,content
|
162 |
+
391,3140962,"src/recording.ts",28652,0,"",typescript,selection_keyboard
|
163 |
+
392,3141094,"src/recording.ts",28652,0,"t",typescript,content
|
164 |
+
393,3141097,"src/recording.ts",28653,0,"",typescript,selection_keyboard
|
165 |
+
394,3141268,"src/recording.ts",28653,0,"p",typescript,content
|
166 |
+
395,3141270,"src/recording.ts",28654,0,"",typescript,selection_keyboard
|
167 |
+
396,3141340,"src/recording.ts",28654,0,"a",typescript,content
|
168 |
+
397,3141341,"src/recording.ts",28655,0,"",typescript,selection_keyboard
|
169 |
+
398,3141375,"src/recording.ts",28655,0,"s",typescript,content
|
170 |
+
399,3141376,"src/recording.ts",28656,0,"",typescript,selection_keyboard
|
171 |
+
400,3141534,"src/recording.ts",28656,0,"s",typescript,content
|
172 |
+
401,3141534,"src/recording.ts",28657,0,"",typescript,selection_keyboard
|
173 |
+
402,3141721,"src/recording.ts",28657,0,"w",typescript,content
|
174 |
+
403,3141721,"src/recording.ts",28658,0,"",typescript,selection_keyboard
|
175 |
+
404,3141921,"src/recording.ts",28658,0,"o",typescript,content
|
176 |
+
405,3141921,"src/recording.ts",28659,0,"",typescript,selection_keyboard
|
177 |
+
406,3141976,"src/recording.ts",28659,0,"r",typescript,content
|
178 |
+
407,3141976,"src/recording.ts",28660,0,"",typescript,selection_keyboard
|
179 |
+
408,3142099,"src/recording.ts",28660,0,"d",typescript,content418,3167843,"src/recording.ts",28660,0,"",typescript,selection_command
|
180 |
+
419,3168521,"src/recording.ts",28646,15,"",typescript,content
|
181 |
+
420,3169730,"src/recording.ts",28645,1,"",typescript,content
|
182 |
+
421,3169747,"src/recording.ts",28644,0,"",typescript,selection_command
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-faba6583-b2c9-4b94-9ba6-9f240428520a1750722089894-2025_06_23-23.44.53.64/source.csv
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type8,5659,"src/recording.ts",28648,0,"",typescript,selection_command21,83215,"src/recording.ts",28651,0,"",typescript,selection_command49,146799,"src/recording.ts",28652,0,"",typescript,selection_command
|
2 |
+
50,148423,"Untitled-1",0,0,"",plaintext,tab
|
3 |
+
51,151646,"Untitled-1",0,0,"\n",plaintext,content
|
4 |
+
52,151896,"Untitled-1",1,0,"\n",plaintext,content
|
5 |
+
53,151920,"Untitled-1",2,0,"\n",plaintext,content
|
6 |
+
54,151954,"Untitled-1",3,0,"\n",plaintext,content
|
7 |
+
55,151987,"Untitled-1",4,0,"\n",plaintext,content
|
8 |
+
56,152021,"Untitled-1",5,0,"\n",plaintext,content
|
9 |
+
57,152054,"Untitled-1",6,0,"\n",plaintext,content
|
10 |
+
58,152087,"Untitled-1",7,0,"\n",plaintext,content
|
11 |
+
59,152121,"Untitled-1",8,0,"\n",plaintext,content
|
12 |
+
60,152154,"Untitled-1",9,0,"\n",plaintext,content
|
13 |
+
61,152188,"Untitled-1",10,0,"\n",plaintext,content
|
14 |
+
62,152220,"Untitled-1",11,0,"\n",plaintext,content
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-faba6583-b2c9-4b94-9ba6-9f240428520a1750722089894-2025_06_23-23.49.28.299/source.csv
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type90,608191,"src/extension.ts",9984,0,"A",typescript,content
|
2 |
+
91,608196,"src/extension.ts",9985,0,"",typescript,selection_keyboard
|
3 |
+
92,608218,"src/extension.ts",9985,0,"P",typescript,content
|
4 |
+
93,608220,"src/extension.ts",9986,0,"",typescript,selection_keyboard
|
5 |
+
94,608231,"src/extension.ts",9986,0,"I",typescript,content
|
6 |
+
95,608233,"src/extension.ts",9987,0,"",typescript,selection_keyboard
|
7 |
+
96,608439,"src/extension.ts",9987,0,"=",typescript,content
|
8 |
+
97,608442,"src/extension.ts",9988,0,"",typescript,selection_keyboard
|
9 |
+
98,609356,"src/extension.ts",9988,0,"f",typescript,content
|
10 |
+
99,609363,"src/extension.ts",9989,0,"",typescript,selection_keyboard
|
11 |
+
100,609425,"src/extension.ts",9989,0,"o",typescript,content
|
12 |
+
101,609429,"src/extension.ts",9990,0,"",typescript,selection_keyboard
|
13 |
+
102,609547,"src/extension.ts",9990,0,"o",typescript,content
|
14 |
+
103,609551,"src/extension.ts",9991,0,"",typescript,selection_keyboard
|
15 |
+
104,610051,"src/extension.ts",9990,0,"",typescript,selection_command
|
16 |
+
105,610641,"src/extension.ts",9984,7,"",typescript,content
|
17 |
+
106,610907,"src/extension.ts",9984,0,"A",typescript,content
|
18 |
+
107,610908,"src/extension.ts",9985,0,"",typescript,selection_keyboard
|
19 |
+
108,610957,"src/extension.ts",9985,0,"P",typescript,content
|
20 |
+
109,610960,"src/extension.ts",9986,0,"",typescript,selection_keyboard
|
21 |
+
110,610983,"src/extension.ts",9986,0,"I",typescript,content
|
22 |
+
111,610986,"src/extension.ts",9987,0,"",typescript,selection_keyboard
|
23 |
+
112,611972,"src/extension.ts",9986,0,"",typescript,selection_command
|
24 |
+
113,612398,"src/extension.ts",9984,0,"",typescript,selection_command
|
25 |
+
114,613099,"src/extension.ts",9984,3,"",typescript,content
|
26 |
+
115,613505,"src/extension.ts",9984,0,"A",typescript,content
|
27 |
+
116,613506,"src/extension.ts",9985,0,"",typescript,selection_keyboard
|
28 |
+
117,613558,"src/extension.ts",9985,0,"P",typescript,content
|
29 |
+
118,613562,"src/extension.ts",9986,0,"",typescript,selection_keyboard
|
30 |
+
119,613614,"src/extension.ts",9986,0,"I",typescript,content
|
31 |
+
120,613618,"src/extension.ts",9987,0,"",typescript,selection_keyboard
|
32 |
+
121,613842,"src/extension.ts",9987,0,"=",typescript,content
|
33 |
+
122,613844,"src/extension.ts",9988,0,"",typescript,selection_keyboard
|
34 |
+
123,614226,"src/extension.ts",9988,0,"f",typescript,content
|
35 |
+
124,614230,"src/extension.ts",9989,0,"",typescript,selection_keyboard
|
36 |
+
125,614281,"src/extension.ts",9989,0,"o",typescript,content
|
37 |
+
126,614414,"src/extension.ts",9990,0,"o",typescript,content
|
38 |
+
127,614418,"src/extension.ts",9991,0,"",typescript,selection_keyboard
|
39 |
+
128,614676,"src/extension.ts",9990,0,"",typescript,selection_command
|
40 |
+
129,615398,"src/extension.ts",9984,7,"",typescript,content
|
41 |
+
130,632977,"src/extension.ts",9984,0,"\n",typescript,content160,689584,"src/extension.ts",9984,0,"\n",typescript,content
|
42 |
+
161,689722,"src/extension.ts",9985,0,"\n",typescript,content
|
43 |
+
162,689854,"src/extension.ts",9986,0,"\n",typescript,content
|
44 |
+
163,689985,"src/extension.ts",9987,0,"\n",typescript,content
|
45 |
+
164,690120,"src/extension.ts",9988,0,"\n",typescript,content
|
46 |
+
165,690244,"src/extension.ts",9989,0,"\n",typescript,content195,720758,"src/extension.ts",9990,1,"",typescript,content
|
47 |
+
196,720908,"src/extension.ts",9989,1,"",typescript,content
|
48 |
+
197,721051,"src/extension.ts",9988,1,"",typescript,content
|
49 |
+
198,721198,"src/extension.ts",9987,1,"",typescript,content
|
50 |
+
199,721347,"src/extension.ts",9986,1,"",typescript,content
|
51 |
+
200,721495,"src/extension.ts",9985,1,"",typescript,content
|
52 |
+
201,721627,"src/extension.ts",9984,1,"",typescript,content
|
53 |
+
202,721775,"src/extension.ts",9984,0,"\n",typescript,content
|
54 |
+
203,721906,"src/extension.ts",9985,0,"\n",typescript,content
|
55 |
+
204,722150,"src/extension.ts",9986,0,"\n",typescript,content
|
56 |
+
205,722190,"src/extension.ts",9987,0,"\n",typescript,content
|
57 |
+
206,722219,"src/extension.ts",9988,0,"\n",typescript,content
|
58 |
+
207,722259,"src/extension.ts",9989,0,"\n",typescript,content
|
59 |
+
208,722283,"src/extension.ts",9990,0,"\n",typescript,content
|
60 |
+
209,722320,"src/extension.ts",9991,0,"\n",typescript,content
|
61 |
+
210,722350,"src/extension.ts",9992,0,"\n",typescript,content
|
62 |
+
211,722387,"src/extension.ts",9993,0,"\n",typescript,content
|
63 |
+
212,722421,"src/extension.ts",9994,0,"\n",typescript,content
|
64 |
+
213,722454,"src/extension.ts",9995,0,"\n",typescript,content
|
65 |
+
214,722489,"src/extension.ts",9996,0,"\n",typescript,content
|
66 |
+
215,722520,"src/extension.ts",9997,0,"\n",typescript,content
|
67 |
+
216,722557,"src/extension.ts",9998,0,"\n",typescript,content
|
68 |
+
217,722587,"src/extension.ts",9999,0,"\n",typescript,content
|
69 |
+
218,722622,"src/extension.ts",10000,0,"\n",typescript,content
|
70 |
+
219,722654,"src/extension.ts",10001,0,"\n",typescript,content
|
71 |
+
220,722688,"src/extension.ts",10002,0,"\n",typescript,content
|
72 |
+
221,722800,"src/extension.ts",10002,1,"",typescript,content
|
73 |
+
222,723054,"src/extension.ts",10001,1,"",typescript,content
|
74 |
+
223,723084,"src/extension.ts",10000,1,"",typescript,content
|
75 |
+
224,723119,"src/extension.ts",9999,1,"",typescript,content254,744309,"src/extension.ts",9990,1,"",typescript,content
|
76 |
+
255,744454,"src/extension.ts",9989,1,"",typescript,content
|
77 |
+
256,744592,"src/extension.ts",9988,1,"",typescript,content
|
78 |
+
257,744729,"src/extension.ts",9987,1,"",typescript,content
|
79 |
+
258,744874,"src/extension.ts",9986,1,"",typescript,content
|
80 |
+
259,745025,"src/extension.ts",9985,1,"",typescript,content
|
81 |
+
260,745342,"src/extension.ts",9984,1,"",typescript,content
|
82 |
+
261,745611,"src/extension.ts",9984,0,"\n",typescript,content
|
83 |
+
262,745787,"src/extension.ts",9985,0,"\n",typescript,content
|
84 |
+
263,745943,"src/extension.ts",9986,0,"\n",typescript,content
|
85 |
+
264,746077,"src/extension.ts",9987,0,"\n",typescript,content
|
86 |
+
265,746223,"src/extension.ts",9988,0,"\n",typescript,content
|
87 |
+
266,746357,"src/extension.ts",9989,0,"\n",typescript,content
|
88 |
+
267,746479,"src/extension.ts",9990,0,"\n",typescript,content
|
89 |
+
268,746689,"src/extension.ts",9990,1,"",typescript,content
|
90 |
+
269,746852,"src/extension.ts",9989,1,"",typescript,content299,1440083,"src/extension.ts",9984,0,"\n",typescript,content
|
91 |
+
300,1440491,"src/extension.ts",9985,0,"\n",typescript,content
|
92 |
+
301,1440637,"src/extension.ts",9986,0,"\n",typescript,content
|
93 |
+
302,1440791,"src/extension.ts",9987,0,"\n",typescript,content
|
94 |
+
303,1440934,"src/extension.ts",9988,0,"\n",typescript,content
|
95 |
+
304,1441072,"src/extension.ts",9989,0,"\n",typescript,content
|
96 |
+
305,1441279,"src/extension.ts",9989,1,"",typescript,content
|
97 |
+
306,1441454,"src/extension.ts",9988,1,"",typescript,content
|
98 |
+
307,1441595,"src/extension.ts",9987,1,"",typescript,content
|
99 |
+
308,1441758,"src/extension.ts",9986,1,"",typescript,content
|
100 |
+
309,1441888,"src/extension.ts",9985,1,"",typescript,content
|
101 |
+
310,1442037,"src/extension.ts",9984,1,"",typescript,content
|
102 |
+
311,1474476,"src/extension.ts",9984,0,"\n",typescript,content341,1511212,"src/extension.ts",9983,0,"\n",typescript,content
|
103 |
+
342,1511454,"src/extension.ts",9984,0,"\n",typescript,content
|
104 |
+
343,1511587,"src/extension.ts",9985,0,"\n",typescript,content
|
105 |
+
344,1511731,"src/extension.ts",9986,0,"\n",typescript,content
|
106 |
+
345,1511864,"src/extension.ts",9987,0,"\n",typescript,content
|
107 |
+
346,1511996,"src/extension.ts",9988,0,"\n",typescript,content
|
108 |
+
347,1512115,"src/extension.ts",9989,0,"\n",typescript,content
|
109 |
+
348,1512251,"src/extension.ts",9990,0,"\n",typescript,content
|
110 |
+
349,1512384,"src/extension.ts",9991,0,"\n",typescript,content379,1535334,"src/extension.ts",9983,0,"\n",typescript,content
|
111 |
+
380,1535574,"src/extension.ts",9984,0,"\n",typescript,content
|
112 |
+
381,1535825,"src/extension.ts",9985,0,"\n",typescript,content
|
113 |
+
382,1535861,"src/extension.ts",9986,0,"\n",typescript,content
|
114 |
+
383,1535890,"src/extension.ts",9987,0,"\n",typescript,content
|
115 |
+
384,1535926,"src/extension.ts",9988,0,"\n",typescript,content414,1662008,"src/extension.ts",9983,0,"\n",typescript,content
|
116 |
+
415,1662468,"src/extension.ts",9984,0,"\n",typescript,content
|
117 |
+
416,1662714,"src/extension.ts",9985,0,"\n",typescript,content
|
118 |
+
417,1662748,"src/extension.ts",9986,0,"\n",typescript,content
|
119 |
+
418,1662778,"src/extension.ts",9987,0,"\n",typescript,content
|
120 |
+
419,1662813,"src/extension.ts",9988,0,"\n",typescript,content
|
121 |
+
420,1662845,"src/extension.ts",9989,0,"\n",typescript,content
|
122 |
+
421,1662880,"src/extension.ts",9990,0,"\n",typescript,content
|
123 |
+
422,1662913,"src/extension.ts",9991,0,"\n",typescript,content
|
124 |
+
423,1662949,"src/extension.ts",9992,0,"\n",typescript,content
|
125 |
+
424,1663179,"src/extension.ts",9993,0,"\n",typescript,content
|
126 |
+
425,1663429,"src/extension.ts",9993,1,"",typescript,content
|
127 |
+
426,1663685,"src/extension.ts",9992,1,"",typescript,content
|
128 |
+
427,1663709,"src/extension.ts",9991,1,"",typescript,content
|
129 |
+
428,1663747,"src/extension.ts",9990,1,"",typescript,content
|
130 |
+
429,1663775,"src/extension.ts",9989,1,"",typescript,content
|
131 |
+
430,1663813,"src/extension.ts",9988,1,"",typescript,content
|
132 |
+
431,1663842,"src/extension.ts",9987,1,"",typescript,content
|
133 |
+
432,1664057,"src/extension.ts",9986,1,"",typescript,content
|
134 |
+
433,1664229,"src/extension.ts",9985,1,"",typescript,content
|
135 |
+
434,1664513,"src/extension.ts",9984,1,"",typescript,content
|
136 |
+
435,1674303,"src/extension.ts",9984,0,"K",typescript,content
|
137 |
+
436,1674306,"src/extension.ts",9985,0,"",typescript,selection_keyboard
|
138 |
+
437,1674445,"src/extension.ts",9985,0,"E",typescript,content
|
139 |
+
438,1674448,"src/extension.ts",9986,0,"",typescript,selection_keyboard
|
140 |
+
439,1675238,"src/extension.ts",9986,0,"Y",typescript,content
|
141 |
+
440,1675248,"src/extension.ts",9987,0,"",typescript,selection_keyboard
|
142 |
+
441,1675504,"src/extension.ts",9987,0,"=",typescript,content
|
143 |
+
442,1675506,"src/extension.ts",9988,0,"",typescript,selection_keyboard
|
144 |
+
443,1675690,"src/extension.ts",9988,0,"f",typescript,content
|
145 |
+
444,1675691,"src/extension.ts",9989,0,"",typescript,selection_keyboard
|
146 |
+
445,1675750,"src/extension.ts",9989,0,"o",typescript,content
|
147 |
+
446,1675754,"src/extension.ts",9990,0,"",typescript,selection_keyboard
|
148 |
+
447,1675900,"src/extension.ts",9990,0,"o",typescript,content
|
149 |
+
448,1675904,"src/extension.ts",9991,0,"",typescript,selection_keyboard
|
150 |
+
449,1676387,"src/extension.ts",9990,0,"",typescript,selection_command
|
151 |
+
450,1676937,"src/extension.ts",9983,8,"",typescript,content
|
4de8d861ed2563988d5f1871647ebc5fe70861b32d24a4b32f9363518653a328/crowd-code-fcaaf520-6a1e-40c5-9a72-85ae7ad4ab0b1750621325310-2025_06_22-12.42.08.659/source.csv
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,14,"utils/dataloader.py",0,0,"from cgi import test\nimport functools\nimport jax\n\nimport tensorflow as tf\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n cache_processed_data: bool = True,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert global_batch_size % num_processes == 0, ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n \n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.cache() if cache_processed_data else dataset\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
3 |
+
2,27,"tasks",0,0,"",Log,tab
|
4 |
+
3,35,"utils/dataloader.py",0,0,"",python,tab
|
5 |
+
4,627510,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:42:08 PM [info] Activating crowd-code\n12:42:08 PM [info] Recording started\n",Log,tab
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-02cb4c77-70ba-4c2a-bfdb-bd7c7d66767f1752013690963-2025_07_09-00.29.05.866/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|