Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-03c66d73-198a-4dbe-97c8-033faa017f891753179432483-2025_07_22-12.17.22.136/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-05830693-fec2-4daa-bf9a-df97d3f440b31752570818732-2025_07_15-11.14.44.738/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0d5e2cbe-83a2-48a0-b2d9-a8e41912bfb61753114173405-2025_07_21-18.09.45.514/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-15e0d6d5-3ae8-440c-b7a5-a1cb5029f9c91754397775073-2025_08_05-14.43.03.950/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-2a9bf505-97e2-485f-ae8a-8a5d3e22aceb1753782197393-2025_07_29-11.43.57.848/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-2ce89d07-b6c0-4b91-ae17-a146e664ff651753363875203-2025_07_24-15.31.17.845/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-2f5b13c7-61f7-4340-b581-9edac6a53f1f1753015255059-2025_07_20-14.41.09.478/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-400c1369-388a-4663-9475-37c30815fb401752229095376-2025_07_11-12.18.28.454/source.csv +233 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-41b294b4-b89c-4c1d-8a02-14afc9168dc41753085667665-2025_07_21-10.15.04.628/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-46a4cc9d-ac37-44ff-ae8d-547db76d96f31752072213286-2025_07_09-16.43.57.848/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4719c5f9-1b15-4792-8afd-690761108bda1751617825355-2025_07_04-10.31.22.581/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-55c207a6-ee6d-464d-8f77-e1220855a4f41754396844541-2025_08_05-14.27.33.594/source.csv +81 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-59af9530-ed37-4620-9980-6c646b3d58821751599132847-2025_07_04-05.19.59.945/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5b1a6152-1602-4538-a4b1-6fa9507221151753212707189-2025_07_22-21.32.36.855/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6829bcbf-f7fb-4481-92ea-521e9af7eabb1754058671446-2025_08_01-16.31.17.606/source.csv +4 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6a683910-8d55-4299-9066-894bbed6c97c1754399347661-2025_08_05-15.09.15.958/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6cc07fef-c3ba-4457-9c7c-5fe5347d53561751545506463-2025_07_03-14.26.06.366/source.csv +283 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-823236e5-d9b8-4c96-ab4a-24b8972648001754120306348-2025_08_02-09.38.34.668/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-847ad052-cbff-45db-a343-a2c6b2d212411753383716687-2025_07_24-21.02.30.722/source.csv +24 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-895267d6-5fbc-45e8-bc56-0d7c756881181750708632303-2025_06_23-12.57.13.921/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-8a8d866a-4152-4c47-8a5f-a8a6a4c71f3e1753768819993-2025_07_29-08.00.29.05/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-a775c5ce-801a-4b55-897a-6c0b6f3448081754127102402-2025_08_02-11.31.50.147/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-ac1e2da2-a2d2-4327-aaa3-5900bc2b3a561753469517123-2025_07_25-20.52.05.763/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-b08d92a3-9c0a-4526-b12f-c973e9c3c43f1752071802867-2025_07_09-16.36.43.962/source.csv +267 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-b2ff9c32-0980-46be-b0f1-51dff76665011752660866284-2025_07_16-12.14.34.814/source.csv +10 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-cc98355f-a627-4bb1-8368-cc01d32ba9141754309337509-2025_08_04-14.09.04.547/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-cdede756-87c5-47af-85f8-bc9bf1c41bac1750785125700-2025_06_24-22.05.07.988/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-d5ccbc8d-af3a-4c10-ab81-61e24d56bad31754400813118-2025_08_05-15.33.50.504/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-d9c3341f-490f-4227-8214-c68384e56a1f1753945239481-2025_07_31-09.00.53.845/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-dca3235c-6967-4aac-8b57-97fa2f10e0ad1753259782320-2025_07_23-10.36.33.63/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-df82217c-210f-4b7f-897f-6f53642cf7c81754033699441-2025_08_01-09.35.08.848/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-e7d20f74-415c-47d0-ad95-3f6da31696d51753194904459-2025_07_22-16.35.52.74/source.csv +0 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f0382786-979c-4a6d-8e9b-f5977f18eb4f1753726151187-2025_08_02-06.58.58.573/source.csv +4 -0
- 1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f23be3cf-4da5-450c-91f1-df9de045459c1752656830830-2025_07_16-11.08.01.978/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-000d5684-56eb-441c-a6df-7ac4df8ff5c71752846982966-2025_07_18-15.57.40.939/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-005af936-919b-4c44-b3af-2ed3459192121752831699789-2025_07_18-11.43.38.119/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-05016444-b54b-4934-b340-97e6db49021a1753717457401-2025_07_28-17.45.12.572/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-08c8f2de-5ecb-431d-b59f-5255a2b202341751712939209-2025_07_05-12.56.07.377/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-08ecc06e-32e0-4a9b-b785-5fb84b3f32de1751802304467-2025_07_06-13.46.47.722/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-12b522dd-8518-4c62-b207-ca1ed4ce90571752782954186-2025_07_17-22.10.14.626/source.csv +60 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-12e65989-9ed0-4f2d-9e6c-43231178a0e21751536307695-2025_07_03-11.53.05.72/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-1dc733b8-f415-4be5-b7dd-dc5953da5bb91753973887840-2025_07_31-16.58.50.401/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-21374577-8901-4de5-b364-53d577190c6a1752060363988-2025_07_09-13.26.43.890/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-25569aaa-6e77-4ce2-b9b6-8ae8c33420051753180192494-2025_07_22-12.30.11.399/source.csv +16 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-268e2d5f-0a66-4008-8495-15de70c8a2e51751028407664-2025_06_27-14.47.06.44/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-2d2437e1-caa5-4315-a7d9-4d9478073a161750944609503-2025_06_26-15.30.55.51/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-2f5e552b-d86c-4a34-a644-139d05fcf0731753100718217-2025_07_21-14.25.46.738/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-30efafeb-a59f-45ff-9626-651f3a2526631753351953527-2025_07_24-12.13.09.956/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-35296db0-5985-4518-8394-cca2184ed6c11751305990184-2025_06_30-19.53.48.49/source.csv +0 -0
- 927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3553d16e-f1c9-4e9c-9425-6b663caf1f311753957765078-2025_07_31-12.30.02.749/source.csv +0 -0
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-03c66d73-198a-4dbe-97c8-033faa017f891753179432483-2025_07_22-12.17.22.136/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-05830693-fec2-4daa-bf9a-df97d3f440b31752570818732-2025_07_15-11.14.44.738/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-0d5e2cbe-83a2-48a0-b2d9-a8e41912bfb61753114173405-2025_07_21-18.09.45.514/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-15e0d6d5-3ae8-440c-b7a5-a1cb5029f9c91754397775073-2025_08_05-14.43.03.950/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-2a9bf505-97e2-485f-ae8a-8a5d3e22aceb1753782197393-2025_07_29-11.43.57.848/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-2ce89d07-b6c0-4b91-ae17-a146e664ff651753363875203-2025_07_24-15.31.17.845/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-2f5b13c7-61f7-4340-b581-9edac6a53f1f1753015255059-2025_07_20-14.41.09.478/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-400c1369-388a-4663-9475-37c30815fb401752229095376-2025_07_11-12.18.28.454/source.csv
ADDED
@@ -0,0 +1,233 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,3,"experiments/tokenizer_optimal_batch_size.sh",0,0,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\ndata_dir=""$PWD/data_arrayrecord/dummy""\nckpt_dir=""$PWD/checkpoints/tokenizer_openai_grain_checkpointing""\n\nexport XLA_FLAGS=--xla_gpu_autotune_level=0\nsrun python train_tokenizer.py \\n --batch_size 12 \\n --ckpt_dir $ckpt_dir \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
3 |
+
2,862,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:18:28 PM [info] Activating crowd-code\n12:18:28 PM [info] Recording started\n12:18:28 PM [info] Initializing git provider using file system watchers...\n12:18:29 PM [info] Git repository found\n12:18:29 PM [info] Git provider initialized successfully\n12:18:29 PM [info] Initial git state: [object Object]\n",Log,tab
|
4 |
+
3,2102,"experiments/tokenizer_optimal_batch_size.sh",0,0,"",shellscript,tab
|
5 |
+
4,8512,"TERMINAL",0,0,"squeue",,terminal_command
|
6 |
+
5,8563,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;squeue;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25h",,terminal_output
|
7 |
+
6,25189,"TERMINAL",0,0,"salloc --gpus=4 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G",,terminal_command
|
8 |
+
7,25260,"TERMINAL",0,0,"[?25l[1;29H\r]633;A[franz.srambical@[31;47;1mhai-login2.haicore.berlin[m:~/jafar] $ ]633;Bsalloc --gpus=4 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G[A\r]633;A[franz.srambical@[31;47;1mhai-login2.haicore.berlin[m:~/jafar] $ ]633;B\r\n\r\r\n[?2004l\r]633;E;salloc --gpus=4 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25hsalloc: Granted job allocation 13338\r\n",,terminal_output
|
9 |
+
8,25684,"TERMINAL",0,0,"salloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
10 |
+
9,25771,"TERMINAL",0,0,"Running inside SLURM, Job ID 13338.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
11 |
+
10,27256,"TERMINAL",0,0,"exit",,terminal_output
|
12 |
+
11,28213,"TERMINAL",0,0,"[?25l[6;55He[?25h",,terminal_output
|
13 |
+
12,29203,"TERMINAL",0,0,"\r\n[?2004l\rbash: exite: command not found\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
14 |
+
13,30079,"TERMINAL",0,0,"exite[K",,terminal_output
|
15 |
+
14,30263,"TERMINAL",0,0,"e",,terminal_output
|
16 |
+
15,30475,"TERMINAL",0,0,"[?25l[8;52Hx[8;54H[?25h",,terminal_output
|
17 |
+
16,30561,"TERMINAL",0,0,"[?25l[8;53Hi[8;55H[?25h",,terminal_output
|
18 |
+
17,30724,"TERMINAL",0,0,"[?25l[8;54Ht[9;1H[?25h",,terminal_output
|
19 |
+
18,30970,"TERMINAL",0,0,"[?25l[?2004l\rexit\r\n[?25hsrun: error: hai002: task 0: Exited with exit code 127\r\nsalloc: Relinquishing job allocation 13338\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;127]633;P;Cwd=/home/franz.srambical/jafar",,terminal_output
|
20 |
+
19,45408,"TERMINAL",0,0,"salloc --gpus=8 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G",,terminal_command
|
21 |
+
20,45451,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25hsalloc: Granted job allocation 13339\r\n",,terminal_output
|
22 |
+
21,45818,"TERMINAL",0,0,"salloc: Nodes hai002 are ready for job\r\nRunning inside SLURM, Job ID 13339.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
23 |
+
22,47437,"TERMINAL",0,0,"[?25le[2mx[22m[2mi[22m[17;54H[?25h[?25l[17;52Hx[17;54H[?25h[?25l[17;53Hi[17;54H[?25h",,terminal_output
|
24 |
+
23,47884,"TERMINAL",0,0,"[?25l[17;54Ht[18;1H[?25h",,terminal_output
|
25 |
+
24,47931,"TERMINAL",0,0,"[?25l[?2004l\rexit\r\nsalloc: Relinquishing job allocation 13339\r\n[?25h",,terminal_output
|
26 |
+
25,58411,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=1 --mem=100G",,terminal_command
|
27 |
+
26,58424,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=1 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;Csalloc: Granted job allocation 13340\r\n",,terminal_output
|
28 |
+
27,58433,"TERMINAL",0,0,"salloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
29 |
+
28,58572,"TERMINAL",0,0,"Running inside SLURM, Job ID 13340.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
30 |
+
29,61650,"TERMINAL",0,0,"[?25ls[2mr[22m[2mu[22m[25;54H[?25h",,terminal_output
|
31 |
+
30,61803,"TERMINAL",0,0,"[?25l[25;52Hr[25;53Hu[25;56H[?25h[?25l[25;54Hn[25;55H [25;56H[?25h",,terminal_output
|
32 |
+
31,66115,"TERMINAL",0,0,"[?25l[25;56H-[25;57H[?25h",,terminal_output
|
33 |
+
32,66370,"TERMINAL",0,0,"[?25l[25;57H-[25;59H[?25h[?25l[25;58Hn[25;59H[?25h",,terminal_output
|
34 |
+
33,66419,"TERMINAL",0,0,"[?25l[25;59Ht[25;61H[?25h",,terminal_output
|
35 |
+
34,66544,"TERMINAL",0,0,"[?25l[25;60Ha[25;62H[?25h[?25l[25;61Hs[25;62H[?25h",,terminal_output
|
36 |
+
35,66670,"TERMINAL",0,0,"[?25l[25;62Hk[25;63H[?25h",,terminal_output
|
37 |
+
36,66915,"TERMINAL",0,0,"[?25l[25;63Hs[25;64H[?25h",,terminal_output
|
38 |
+
37,68509,"TERMINAL",0,0,"[?25l[25;64H=[25;65H[?25h",,terminal_output
|
39 |
+
38,68723,"TERMINAL",0,0,"[?25l[25;65H9[25;67H[?25h[?25l[25;66H [25;67H[?25h",,terminal_output
|
40 |
+
39,69523,"TERMINAL",0,0,"[?25l[25;65H8[25;66H[?25h",,terminal_output
|
41 |
+
40,69623,"TERMINAL",0,0,"[?25l[25;66H [25;67H[?25h",,terminal_output
|
42 |
+
41,76724,"experiments/tokenizer_optimal_batch_size.sh",220,0,"",shellscript,selection_command
|
43 |
+
42,77004,"experiments/tokenizer_optimal_batch_size.sh",208,0,"",shellscript,selection_command
|
44 |
+
43,77140,"experiments/tokenizer_optimal_batch_size.sh",201,0,"",shellscript,selection_command
|
45 |
+
44,77260,"experiments/tokenizer_optimal_batch_size.sh",196,0,"",shellscript,selection_command
|
46 |
+
45,77780,"experiments/tokenizer_optimal_batch_size.sh",199,0,"",shellscript,selection_command
|
47 |
+
46,78055,"experiments/tokenizer_optimal_batch_size.sh",200,0,"",shellscript,selection_command
|
48 |
+
47,78141,"experiments/tokenizer_optimal_batch_size.sh",200,0," ",shellscript,content
|
49 |
+
48,78141,"experiments/tokenizer_optimal_batch_size.sh",201,0,"",shellscript,selection_keyboard
|
50 |
+
49,80576,"experiments/tokenizer_optimal_batch_size.sh",200,1,"",shellscript,content
|
51 |
+
50,80683,"experiments/tokenizer_optimal_batch_size.sh",199,0,"",shellscript,selection_command
|
52 |
+
51,89691,"experiments/tokenizer_grain_checkpointing.sh",0,0,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\ndata_dir=""$PWD/data_arrayrecord/dummy""\nckpt_dir=""$PWD/checkpoints/tokenizer_openai_grain_checkpointing""\n\nexport XLA_FLAGS=--xla_gpu_autotune_level=0\nsrun python train_tokenizer.py \\n --batch_size 48 \\n --restore_ckpt \\n --save_ckpt \\n --log_checkpoint_interval 5 \\n --ckpt_dir $ckpt_dir \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
53 |
+
52,92956,"experiments/tokenizer_grain_checkpointing.sh",466,0,"",shellscript,selection_command
|
54 |
+
53,93385,"experiments/tokenizer_grain_checkpointing.sh",441,0,"",shellscript,selection_command
|
55 |
+
54,93386,"experiments/tokenizer_grain_checkpointing.sh",416,0,"",shellscript,selection_command
|
56 |
+
55,93386,"experiments/tokenizer_grain_checkpointing.sh",399,0,"",shellscript,selection_command
|
57 |
+
56,93389,"experiments/tokenizer_grain_checkpointing.sh",374,0,"",shellscript,selection_command
|
58 |
+
57,93389,"experiments/tokenizer_grain_checkpointing.sh",347,0,"",shellscript,selection_command
|
59 |
+
58,93399,"experiments/tokenizer_grain_checkpointing.sh",313,0,"",shellscript,selection_command
|
60 |
+
59,93399,"experiments/tokenizer_grain_checkpointing.sh",288,0,"",shellscript,selection_command
|
61 |
+
60,93443,"experiments/tokenizer_grain_checkpointing.sh",270,0,"",shellscript,selection_command
|
62 |
+
61,93563,"experiments/tokenizer_grain_checkpointing.sh",249,0,"",shellscript,selection_command
|
63 |
+
62,93852,"experiments/tokenizer_grain_checkpointing.sh",246,0,"",shellscript,selection_command
|
64 |
+
63,99523,"experiments/tokenizer_grain_checkpointing.sh",246,2,"",shellscript,content
|
65 |
+
64,100143,"experiments/tokenizer_grain_checkpointing.sh",246,0,"9",shellscript,content
|
66 |
+
65,100144,"experiments/tokenizer_grain_checkpointing.sh",247,0,"",shellscript,selection_keyboard
|
67 |
+
66,100232,"experiments/tokenizer_grain_checkpointing.sh",247,0,"6",shellscript,content
|
68 |
+
67,100232,"experiments/tokenizer_grain_checkpointing.sh",248,0,"",shellscript,selection_keyboard
|
69 |
+
68,100483,"experiments/tokenizer_grain_checkpointing.sh",247,0,"",shellscript,selection_command
|
70 |
+
69,104563,"experiments/tokenizer_grain_checkpointing.sh",214,0,"",shellscript,selection_command
|
71 |
+
70,104683,"experiments/tokenizer_grain_checkpointing.sh",208,0,"",shellscript,selection_command
|
72 |
+
71,104811,"experiments/tokenizer_grain_checkpointing.sh",201,0,"",shellscript,selection_command
|
73 |
+
72,106363,"experiments/tokenizer_grain_checkpointing.sh",201,0,"-",shellscript,content
|
74 |
+
73,106364,"experiments/tokenizer_grain_checkpointing.sh",202,0,"",shellscript,selection_keyboard
|
75 |
+
74,106364,"experiments/tokenizer_grain_checkpointing.sh",202,0,"-",shellscript,content
|
76 |
+
75,106364,"experiments/tokenizer_grain_checkpointing.sh",203,0,"",shellscript,selection_keyboard
|
77 |
+
76,106364,"experiments/tokenizer_grain_checkpointing.sh",203,0,"n",shellscript,content
|
78 |
+
77,106365,"experiments/tokenizer_grain_checkpointing.sh",204,0,"",shellscript,selection_keyboard
|
79 |
+
78,106365,"experiments/tokenizer_grain_checkpointing.sh",204,0,"t",shellscript,content
|
80 |
+
79,106365,"experiments/tokenizer_grain_checkpointing.sh",205,0,"",shellscript,selection_keyboard
|
81 |
+
80,106443,"experiments/tokenizer_grain_checkpointing.sh",205,0,"a",shellscript,content
|
82 |
+
81,106444,"experiments/tokenizer_grain_checkpointing.sh",206,0,"",shellscript,selection_keyboard
|
83 |
+
82,106539,"experiments/tokenizer_grain_checkpointing.sh",206,0,"s",shellscript,content
|
84 |
+
83,106539,"experiments/tokenizer_grain_checkpointing.sh",207,0,"",shellscript,selection_keyboard
|
85 |
+
84,107223,"experiments/tokenizer_grain_checkpointing.sh",207,0,"k",shellscript,content
|
86 |
+
85,107224,"experiments/tokenizer_grain_checkpointing.sh",208,0,"",shellscript,selection_keyboard
|
87 |
+
86,107311,"experiments/tokenizer_grain_checkpointing.sh",208,0,"s",shellscript,content
|
88 |
+
87,107312,"experiments/tokenizer_grain_checkpointing.sh",209,0,"",shellscript,selection_keyboard
|
89 |
+
88,108179,"experiments/tokenizer_grain_checkpointing.sh",209,0,"=",shellscript,content
|
90 |
+
89,108179,"experiments/tokenizer_grain_checkpointing.sh",210,0,"",shellscript,selection_keyboard
|
91 |
+
90,108400,"experiments/tokenizer_grain_checkpointing.sh",210,0,"8",shellscript,content
|
92 |
+
91,108400,"experiments/tokenizer_grain_checkpointing.sh",211,0,"",shellscript,selection_keyboard
|
93 |
+
92,108771,"experiments/tokenizer_grain_checkpointing.sh",210,0,"",shellscript,selection_command
|
94 |
+
93,108923,"experiments/tokenizer_grain_checkpointing.sh",211,0,"",shellscript,selection_command
|
95 |
+
94,108987,"experiments/tokenizer_grain_checkpointing.sh",211,0," ",shellscript,content
|
96 |
+
95,108987,"experiments/tokenizer_grain_checkpointing.sh",212,0,"",shellscript,selection_keyboard
|
97 |
+
96,109163,"experiments/tokenizer_grain_checkpointing.sh",211,0,"",shellscript,selection_command
|
98 |
+
97,110511,"experiments/tokenizer_grain_checkpointing.sh",210,0,"",shellscript,selection_command
|
99 |
+
98,110683,"experiments/tokenizer_grain_checkpointing.sh",209,0,"",shellscript,selection_command
|
100 |
+
99,110803,"experiments/tokenizer_grain_checkpointing.sh",203,0,"",shellscript,selection_command
|
101 |
+
100,110963,"experiments/tokenizer_grain_checkpointing.sh",201,0,"",shellscript,selection_command
|
102 |
+
101,111571,"experiments/tokenizer_grain_checkpointing.sh",203,0,"",shellscript,selection_command
|
103 |
+
102,111572,"experiments/tokenizer_grain_checkpointing.sh",209,0,"",shellscript,selection_command
|
104 |
+
103,111572,"experiments/tokenizer_grain_checkpointing.sh",210,0,"",shellscript,selection_command
|
105 |
+
104,115151,"TERMINAL",0,0,"[K",,terminal_output
|
106 |
+
105,115695,"TERMINAL",0,0,"[?25l[25;51Hb[25;53H[?25h",,terminal_output
|
107 |
+
106,115763,"TERMINAL",0,0,"[?25l[25;52Ha[25;54H[?25h[?25l[25;53Hs[25;54H[?25h",,terminal_output
|
108 |
+
107,115891,"TERMINAL",0,0,"[?25l[25;54Hh[25;55H[?25h",,terminal_output
|
109 |
+
108,115995,"TERMINAL",0,0,"[?25l[25;55H [25;56H[?25h",,terminal_output
|
110 |
+
109,116611,"TERMINAL",0,0,"[?25l[25;56Hx[25;57H[?25h",,terminal_output
|
111 |
+
110,116883,"TERMINAL",0,0,"[?25l[25;56Hx[25;57H[?25h",,terminal_output
|
112 |
+
111,117263,"TERMINAL",0,0,"[?25l[25;56He[25;57H[?25h",,terminal_output
|
113 |
+
112,117443,"TERMINAL",0,0,"[?25l[25;57Hx[25;58H[?25h",,terminal_output
|
114 |
+
113,117611,"TERMINAL",0,0,"[?25l[25;58Hp[25;59H[?25h",,terminal_output
|
115 |
+
114,117763,"TERMINAL",0,0,"[?25l[25;59He[25;61H[?25h[?25l[25;60Hr[25;61H[?25h",,terminal_output
|
116 |
+
115,117923,"TERMINAL",0,0,"[?25l[25;61Hi[25;62H[?25h",,terminal_output
|
117 |
+
116,118047,"TERMINAL",0,0,"ments/",,terminal_output
|
118 |
+
117,118523,"TERMINAL",0,0,"[?25l[25;68Ht[25;69H[?25h",,terminal_output
|
119 |
+
118,118643,"TERMINAL",0,0,"[?25l[25;69Ho[25;70Hk[25;71H[?25h",,terminal_output
|
120 |
+
119,118723,"TERMINAL",0,0,"enizer_",,terminal_output
|
121 |
+
120,119899,"TERMINAL",0,0,"[?25l[25;78Hg[25;79H[?25h",,terminal_output
|
122 |
+
121,120099,"TERMINAL",0,0,"[?25l[25;79Hr[25;81H[?25h[?25l[25;80Ha[25;82H[?25h[?25l[25;81Hi[25;83H[?25h",,terminal_output
|
123 |
+
122,120191,"TERMINAL",0,0,"[?25l[25;82Hn[25;83H[?25h",,terminal_output
|
124 |
+
123,120291,"TERMINAL",0,0,"_checkpointing.sh ",,terminal_output
|
125 |
+
124,120807,"TERMINAL",0,0,"[?25l[?2004l\r[?25hsrun: error: Unable to create step for job 13340: More processors requested than permitted\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
126 |
+
125,132623,"TERMINAL",0,0,"e",,terminal_output
|
127 |
+
126,132691,"TERMINAL",0,0,"x",,terminal_output
|
128 |
+
127,132823,"TERMINAL",0,0,"it",,terminal_output
|
129 |
+
128,134760,"TERMINAL",0,0,"\r\n[?2004l\rexit\r\nsrun: error: hai002: task 0: Exited with exit code 1\r\nsalloc: Relinquishing job allocation 13340\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;1]633;P;Cwd=/home/franz.srambical/jafar",,terminal_output
|
130 |
+
129,141531,"TERMINAL",0,0,"salloc --gpus=8 --cpus=8 --mem=100G",,terminal_command
|
131 |
+
130,141532,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;salloc --gpus=8 --cpus=8 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;Csalloc: option '--cpus=8' is ambiguous; possibilities: '--cpus-per-gpu' '--cpus-per-task'\r\nTry ""salloc --help"" for more information\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;255[?25h",,terminal_output
|
132 |
+
131,150389,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-gpu=1 --mem=100G",,terminal_command
|
133 |
+
132,150447,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-gpu=1 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25hsalloc: Pending job allocation 13341\r\nsalloc: job 13341 queued and waiting for resources\r\n",,terminal_output
|
134 |
+
133,163548,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=8 --mem=100G",,terminal_command
|
135 |
+
134,163651,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=8 --mem=100G;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;C[?25hsalloc: Pending job allocation 13342\r\nsalloc: job 13342 queued and waiting for resources\r\n",,terminal_output
|
136 |
+
135,184827,"TERMINAL",0,0,"exit",,terminal_command
|
137 |
+
136,184878,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;exit;8d3c1446-1b1d-4023-8f8d-e68e552459ae]633;Cexit\r\n[?25h",,terminal_output
|
138 |
+
137,186518,"TERMINAL",0,0,"",,terminal_focus
|
139 |
+
138,198439,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=4 --mem=100G",,terminal_command
|
140 |
+
139,198518,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=4 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C[?25hsalloc: Pending job allocation 13343\r\nsalloc: job 13343 queued and waiting for resources\r\n",,terminal_output
|
141 |
+
140,210058,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=1 --mem=100G",,terminal_command
|
142 |
+
141,210116,"TERMINAL",0,0,"\r\n\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=1 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;Csalloc: Pending job allocation 13344\r\nsalloc: job 13344 queued and waiting for resources\r\n",,terminal_output
|
143 |
+
142,217717,"TERMINAL",0,0,"^C",,terminal_command
|
144 |
+
143,217732,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C",,terminal_output
|
145 |
+
144,219775,"TERMINAL",0,0,"squeue",,terminal_command
|
146 |
+
145,219776,"TERMINAL",0,0,"[?25l[1;29H\r]633;A[franz.srambical@[31;47;1mhai-login2.haicore.berlin[m:~/jafar] $ ]633;Bsqueue\r\n[?2004l\r]633;E;squeue;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n[?25h",,terminal_output
|
147 |
+
146,231205,"TERMINAL",0,0,"salloc --gpus=8 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G",,terminal_command
|
148 |
+
147,231250,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;salloc --gpus=8 --ntasks-per-node=4 --cpus-per-task=1 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C[?25hsalloc: Pending job allocation 13345\r\nsalloc: job 13345 queued and waiting for resources\r\n",,terminal_output
|
149 |
+
148,243682,"TERMINAL",0,0,"salloc: job 13345 has been allocated resources\r\nsalloc: Granted job allocation 13345\r\n",,terminal_output
|
150 |
+
149,243813,"TERMINAL",0,0,"salloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
151 |
+
150,244111,"TERMINAL",0,0,"Running inside SLURM, Job ID 13345.\r\n",,terminal_output
|
152 |
+
151,244202,"TERMINAL",0,0,"]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
153 |
+
152,374723,"TERMINAL",0,0,"[?25ls[2mr[22m[18;53H[?25h[?25l[18;52Hr[18;53H[?25h",,terminal_output
|
154 |
+
153,374876,"TERMINAL",0,0,"[?25l[18;53Hu[18;55H[?25h[?25l[18;54Hn[18;56H[?25h",,terminal_output
|
155 |
+
154,374975,"TERMINAL",0,0,"[?25l[18;55H [18;56H[?25h",,terminal_output
|
156 |
+
155,377109,"TERMINAL",0,0,"[?25l[18;56H-[18;57H[?25h",,terminal_output
|
157 |
+
156,377347,"TERMINAL",0,0,"[?25l[18;57H-[18;58H[?25h",,terminal_output
|
158 |
+
157,377526,"TERMINAL",0,0,"[?25l[18;58Hn[18;60H[?25h",,terminal_output
|
159 |
+
158,377674,"TERMINAL",0,0,"[?25l[18;59Ht[18;61H[?25h",,terminal_output
|
160 |
+
159,377836,"TERMINAL",0,0,"[?25l[18;60Ha[18;63H[?25h[?25l[18;61Hs[18;63H[?25h",,terminal_output
|
161 |
+
160,378034,"TERMINAL",0,0,"[?25l[18;62Hk[18;64H[?25h[?25l[18;63Hs[18;64H[?25h",,terminal_output
|
162 |
+
161,378294,"TERMINAL",0,0,"[?25l[18;64H=[18;66H[?25h",,terminal_output
|
163 |
+
162,378467,"TERMINAL",0,0,"[?25l[18;65H8[18;66H[?25h",,terminal_output
|
164 |
+
163,378714,"TERMINAL",0,0,"[?25l[?2004l\r[?25hsrun: fatal: No command given to execute.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
165 |
+
164,381944,"TERMINAL",0,0,"[?25lb[2ma[22m[20;53H[?25h",,terminal_output
|
166 |
+
165,381996,"TERMINAL",0,0,"[?25l[20;52Ha[20;54H[?25h",,terminal_output
|
167 |
+
166,382073,"TERMINAL",0,0,"[?25l[20;53Hs[20;55H[?25h",,terminal_output
|
168 |
+
167,382134,"TERMINAL",0,0,"[?25l[20;54Hh[20;55H[?25h",,terminal_output
|
169 |
+
168,382242,"TERMINAL",0,0,"[?25l[20;55H [20;57H[?25h",,terminal_output
|
170 |
+
169,382287,"TERMINAL",0,0,"[?25l[20;56He[20;57H[?25h",,terminal_output
|
171 |
+
170,382501,"TERMINAL",0,0,"[?25l[20;57Hx[20;59H[?25h",,terminal_output
|
172 |
+
171,382564,"TERMINAL",0,0,"[?25l[20;58Hp[20;60H[?25h",,terminal_output
|
173 |
+
172,382650,"TERMINAL",0,0,"[?25l[20;59He[20;61H[?25h",,terminal_output
|
174 |
+
173,382713,"TERMINAL",0,0,"[?25l[20;60Hr[20;63H[?25h",,terminal_output
|
175 |
+
174,382783,"TERMINAL",0,0,"[?25l[20;61Hi[20;63H[?25h[?25l[20;62Hm[20;63H[?25h",,terminal_output
|
176 |
+
175,382949,"TERMINAL",0,0,"ents/",,terminal_output
|
177 |
+
176,383082,"TERMINAL",0,0,"[?25l[20;68Ht[20;69H[?25h",,terminal_output
|
178 |
+
177,383249,"TERMINAL",0,0,"[?25l[20;69Ho[20;71H[?25h[?25l[20;70Hk[20;71H[?25h",,terminal_output
|
179 |
+
178,383400,"TERMINAL",0,0,"enizer_",,terminal_output
|
180 |
+
179,384268,"TERMINAL",0,0,"[?25l[20;78Hg[20;79H[?25h",,terminal_output
|
181 |
+
180,384457,"TERMINAL",0,0,"[?25l[20;79Hr[20;80H[?25h",,terminal_output
|
182 |
+
181,384620,"TERMINAL",0,0,"ain_checkpointing.sh ",,terminal_output
|
183 |
+
182,384835,"TERMINAL",0,0,"[?25l[?2004l\r[?25hsrun: warning: can't honor --ntasks-per-node set to 4 which doesn't match the requested tasks 8 with the maximum number of requested nodes 1. Ignoring --ntasks-per-node.\r\nsrun: error: Unable to create step for job 13345: More processors requested than permitted\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
184 |
+
183,402218,"TERMINAL",0,0,"e",,terminal_output
|
185 |
+
184,402417,"TERMINAL",0,0,"[?25l[25;52Hx[25;54H[?25h",,terminal_output
|
186 |
+
185,402489,"TERMINAL",0,0,"[?25l[25;53Hi[25;55H[?25h",,terminal_output
|
187 |
+
186,402572,"TERMINAL",0,0,"[?25l[25;54Ht[25;55H[?25h",,terminal_output
|
188 |
+
187,402688,"TERMINAL",0,0,"[?25l[?2004l\rexit\r\n[?25hsrun: error: hai002: task 0: Exited with exit code 1\r\nsalloc: Relinquishing job allocation 13345\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;1",,terminal_output
|
189 |
+
188,410925,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=4 --mem=100G",,terminal_command
|
190 |
+
189,411026,"TERMINAL",0,0,"\r\n\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=4 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;Csalloc: Granted job allocation 13346\r\nsalloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
191 |
+
190,411346,"TERMINAL",0,0,"Running inside SLURM, Job ID 13346.\r\n",,terminal_output
|
192 |
+
191,411439,"TERMINAL",0,0,"]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
193 |
+
192,412564,"TERMINAL",0,0,"e",,terminal_output
|
194 |
+
193,412716,"TERMINAL",0,0,"[?25l[34;52Hx[34;54H[?25h",,terminal_output
|
195 |
+
194,412797,"TERMINAL",0,0,"[?25l[34;53Hi[34;54H[?25h",,terminal_output
|
196 |
+
195,412896,"TERMINAL",0,0,"[?25l[34;54Ht[34;55H[?25h",,terminal_output
|
197 |
+
196,412956,"TERMINAL",0,0,"[?25l[?2004l\rexit\r\nsalloc: Relinquishing job allocation 13346\r\n]0;franz.srambical@hai-login2:~/jafar]633;D;0[?25h",,terminal_output
|
198 |
+
197,415898,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=5 --mem=100G",,terminal_command
|
199 |
+
198,415959,"TERMINAL",0,0,"[?25l\r\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=5 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C[?25hsalloc: Pending job allocation 13347\r\nsalloc: job 13347 queued and waiting for resources\r\n",,terminal_output
|
200 |
+
199,419258,"TERMINAL",0,0,"",,terminal_focus
|
201 |
+
200,422599,"TERMINAL",0,0,"squeue",,terminal_command
|
202 |
+
201,422607,"TERMINAL",0,0,"\r]633;A[franz.srambical@[31;47;1mhai-login2.haicore.berlin[m:~/jafar] $ ]633;Bsqueue\r\n[?2004l\r]633;E;squeue;6def2f5c-c4a9-4ad4-8a90-ea797733dc74]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n",,terminal_output
|
203 |
+
202,426751,"TERMINAL",0,0,"salloc",,terminal_focus
|
204 |
+
203,479574,"TERMINAL",0,0,"^Csalloc: Job allocation 13347 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;franz.srambical@hai-login2:~/jafar",,terminal_output
|
205 |
+
204,481639,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=5",,terminal_command
|
206 |
+
205,481686,"TERMINAL",0,0,"[?25l[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=5;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C[?25hsalloc: Pending job allocation 13348\r\nsalloc: job 13348 queued and waiting for resources\r\n",,terminal_output
|
207 |
+
206,483620,"TERMINAL",0,0,"bash",,terminal_focus
|
208 |
+
207,487752,"TERMINAL",0,0,"salloc",,terminal_focus
|
209 |
+
208,489649,"TERMINAL",0,0,"bash",,terminal_focus
|
210 |
+
209,491295,"TERMINAL",0,0,"squeue",,terminal_command
|
211 |
+
210,491312,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;squeue;6def2f5c-c4a9-4ad4-8a90-ea797733dc74]633;C JOBID USER PARTITION NODES CPUS ST SUBMIT_TIME START_TIME TIME TIME_LIMIT NODELIST(REASON)\r\n 13348 franz.sram interacti 1 5 PD 2025-07-11T12:26:30 N/A 0:00 1-00:00:00 (QOSMaxCpuPerUserLimit)\r\n",,terminal_output
|
212 |
+
211,494054,"TERMINAL",0,0,"salloc",,terminal_focus
|
213 |
+
212,539208,"TERMINAL",0,0,"salloc --gpus=8 --cpus-per-task=4 --mem=100G",,terminal_command
|
214 |
+
213,539279,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;salloc --gpus=8 --cpus-per-task=4 --mem=100G;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;Csalloc: Granted job allocation 13349\r\nsalloc: Nodes hai002 are ready for job\r\n",,terminal_output
|
215 |
+
214,539594,"TERMINAL",0,0,"Running inside SLURM, Job ID 13349.\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
216 |
+
215,541451,"TERMINAL",0,0,"[?25lb[2ma[22m[58;53H[?25h",,terminal_output
|
217 |
+
216,541656,"TERMINAL",0,0,"[?25l[58;52Ha[58;55H[?25h[?25l[58;53Hs[58;55H[?25h",,terminal_output
|
218 |
+
217,541684,"TERMINAL",0,0,"[?25l[58;54Hh[58;56H[?25h",,terminal_output
|
219 |
+
218,541864,"TERMINAL",0,0,"[?25l[58;55H [58;57H[?25h[?25l[58;56He[58;57H[?25h",,terminal_output
|
220 |
+
219,542070,"TERMINAL",0,0,"[?25l[58;57Hx[58;58Hp[58;59H[?25h",,terminal_output
|
221 |
+
220,542292,"TERMINAL",0,0,"[?25l[58;59He[58;63H[?25h[?25l[58;60Hr[58;63H[?25h",,terminal_output
|
222 |
+
221,542392,"TERMINAL",0,0,"[?25l[58;61Hi[58;62Hm[58;63H[?25h",,terminal_output
|
223 |
+
222,542544,"TERMINAL",0,0,"ents/",,terminal_output
|
224 |
+
223,542712,"TERMINAL",0,0,"[?25l[58;69H[X[58;68H[X[0mt[?25hok",,terminal_output
|
225 |
+
224,542904,"TERMINAL",0,0,"[?25l[58;71H[X[0menizer_[?25h",,terminal_output
|
226 |
+
225,543085,"TERMINAL",0,0,"g",,terminal_output
|
227 |
+
226,543284,"TERMINAL",0,0,"rain_checkpointing.sh ",,terminal_output
|
228 |
+
227,543784,"TERMINAL",0,0,"\r\n[?2004l\rsrun: error: Unable to create step for job 13349: More processors requested than permitted\r\n]0;franz.srambical@hai-login2:~/jafar[?2004h[[email protected]:~/jafar] $ ",,terminal_output
|
229 |
+
228,850377,"TERMINAL",0,0,"[?25le[2mx[22m[2mi[22m[58;54H[?25h[?25l[58;52Hx[58;54H[?25h",,terminal_output
|
230 |
+
229,850735,"TERMINAL",0,0,"[?25l[58;53Hi[58;55H[?25h[?25l[58;54Ht[58;55H[?25h\r\n[?2004l\rexit\r\n",,terminal_output
|
231 |
+
230,851043,"TERMINAL",0,0,"srun: error: hai002: task 0: Exited with exit code 1\r\nsalloc: Relinquishing job allocation 13349\r\nsalloc: Job allocation 13349 has been revoked.\r\n",,terminal_output
|
232 |
+
231,853283,"TERMINAL",0,0,"squeue",,terminal_command
|
233 |
+
232,853285,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;squeue;55d01655-a60b-4820-915a-c4f2af7e0b4c]633;C",,terminal_output
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-41b294b4-b89c-4c1d-8a02-14afc9168dc41753085667665-2025_07_21-10.15.04.628/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-46a4cc9d-ac37-44ff-ae8d-547db76d96f31752072213286-2025_07_09-16.43.57.848/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-4719c5f9-1b15-4792-8afd-690761108bda1751617825355-2025_07_04-10.31.22.581/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-55c207a6-ee6d-464d-8f77-e1220855a4f41754396844541-2025_08_05-14.27.33.594/source.csv
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
2,631,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:27:33 PM [info] Activating crowd-code\n2:27:33 PM [info] Recording started\n2:27:33 PM [info] Initializing git provider using file system watchers...\n2:27:33 PM [info] Git repository found\n2:27:33 PM [info] Git provider initialized successfully\n2:27:33 PM [info] Initial git state: [object Object]\n",Log,tab
|
3 |
+
3,4851,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n use_gt_actions: bool = False\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n if args.use_gt_actions:\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_actions, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n else:\n codebook_usage_lam = None\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n use_gt_actions=args.use_gt_actions,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n use_gt_actions=args.use_gt_actions,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos, actions in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, actions=actions, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
4 |
+
4,5815,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n use_gt_actions: bool = False\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n if args.use_gt_actions:\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_actions, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n else:\n codebook_usage_lam = None\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_actions=args.num_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n use_gt_actions=args.use_gt_actions,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n use_gt_actions=args.use_gt_actions,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos, actions in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, actions=actions, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(optimizer.model, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
5 |
+
5,39197,"train_dynamics.py",1557,10990," num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_type: str = ""maskgit"" # supported options: maskgit, causal\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Genie, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n gt = jnp.asarray(inputs[""videos""], dtype=jnp.float32) / 255.0\n inputs[""videos""] = gt.astype(args.dtype)\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = gt.clip(0, 1).reshape(-1, *gt.shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(\n model: Genie, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Genie) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.key(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_type=args.dyna_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n decode=False,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n del genie\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the (unused) tokenizer vq dropout due flax.nnx lazily initializing modules.\n # Specifically, the first dynamics model checkpoint will contain the vq dropout module,\n # but the first full restore will fail due to nnx not initializing the module when\n # dropout is set to 0.0.\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n",python,content
|
6 |
+
6,40187,"train_dynamics.py",0,0,"Switched from branch 'gt-action-prepending-support-maskgit' to 'causal-spatiotemporal-kv-cache'",python,git_branch_checkout
|
7 |
+
7,47544,"train_dynamics.py",10520,0,"",python,selection_keyboard
|
8 |
+
8,50404,"utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nnx.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n def __init__(self, d_model: int, max_len: int = 5000):\n self.d_model = d_model\n self.max_len = max_len\n\n pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n self.pe = nnx.Variable(pe)\n\n def __call__(self, x: jax.Array) -> jax.Array:\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.spatial_pos_enc = PositionalEncoding(self.dim)\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_pos_enc = PositionalEncoding(self.dim)\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNO = self.output_dense(x_BTNM)\n return x_BTNO\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.temporal_pos_enc = PositionalEncoding(self.model_dim)\n self.spatial_pos_enc = PositionalEncoding(self.model_dim)\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n if self.decode:\n assert pos_index is not None\n z_FM = z_FNM[:, pos_index[1]]\n z_F1M = jnp.reshape(z_FM, (B * T, 1, M))\n z_F1M = self.spatial_attention(z_F1M)\n z_FM = jnp.reshape(z_F1M, (B * T, M))\n z_FNM = z_FNM.at[:, pos_index[1], :].set(z_FM)\n else:\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n if self.decode:\n assert pos_index is not None\n z_PM = z_PTM[:, pos_index[0]]\n z_P1M = jnp.reshape(z_PM, (B * N, 1, M))\n z_P1M = self.temporal_attention(z_P1M)\n z_PM = jnp.reshape(z_P1M, (B * N, M))\n z_PTM = z_PTM.at[:, pos_index[0], :].set(z_PM)\n else:\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.pos_enc = PositionalEncoding(self.model_dim)\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n return x_BTNV\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n def __init__(\n self, latent_dim: int, num_latents: int, dropout: float, rngs: nnx.Rngs\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n\n self.codebook = nnx.Param(\n normalize(\n nnx.initializers.lecun_uniform()(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(self.codebook.value)\n distance_DK = -jnp.matmul(x_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = self.codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_DL + jax.lax.stop_gradient(z_DL - x_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n def attention_fn(query_BTHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x, pad_size):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BTHD.shape\n T = query_BTHD.shape[-3]\n S = key_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n Q = ((T + 3) // 4) * 4\n pad_size_Q = Q - T\n K = ((S + 3) // 4) * 4\n pad_size_K = K - S\n\n query_BQHD = _pad(_merge_batch_dims(query_BTHD), pad_size_Q)\n key_BKHD = _pad(_merge_batch_dims(key_BSHD), pad_size_K)\n value_BKHD = _pad(_merge_batch_dims(value_BSHD), pad_size_K)\n B = query_BQHD.shape[0]\n\n attention_mask = jnp.ones((Q, K), dtype=jnp.bool_)\n attention_mask = attention_mask.at[Q:, :].set(False)\n attention_mask = attention_mask.at[:, K:].set(False)\n\n # Handle causal mask for cached decoder self-attention (from nnx.MultiHeadAttention)\n if mask_B111 is not None:\n # FIXME (f.srambical): Why do we need this?\n mask_B111 = _merge_batch_dims(mask_B111)\n # We need to broadcast T and S dimensions to target_seq_len since cudnn attention strictly checks the mask shape\n # https://github.com/jax-ml/jax/issues/28974\n # https://github.com/jax-ml/jax/blob/08c7677393672ccb85c10f1ed0bd506905c3c994/jax/_src/cudnn/fused_attention_stablehlo.py#L1830\n # https://github.com/jax-ml/jax/blob/08c7677393672ccb85c10f1ed0bd506905c3c994/jax/_src/cudnn/fused_attention_stablehlo.py#L337\n mask_B1TS = einops.repeat(mask_B111, ""... 1 1 -> ... t s"", t=Q, s=K)\n mask_B1TS = mask_B111.astype(jnp.bool)\n else:\n mask_11TS = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n mask_B1TS = jnp.broadcast_to(mask_11TS, (B, 1, Q, K))\n\n bias_4d = _merge_batch_dims(bias) if bias is not None else None\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BQHD,\n key=key_BKHD,\n value=value_BKHD,\n bias=bias_4d,\n mask=mask_B1TS,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :T, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab
|
9 |
+
9,51623,"utils/nn.py",4285,0,"",python,selection_command
|
10 |
+
10,52308,"utils/nn.py",6901,0,"",python,selection_command
|
11 |
+
11,177589,"utils/nn.py",7044,0,"",python,selection_command
|
12 |
+
12,179216,"utils/nn.py",7320,0,"",python,selection_command
|
13 |
+
13,179299,"utils/nn.py",7330,0,"",python,selection_command
|
14 |
+
14,185317,"utils/nn.py",7508,0,"",python,selection_command
|
15 |
+
15,193669,"utils/nn.py",11253,0,"",python,selection_command
|
16 |
+
16,196652,"utils/nn.py",12324,0,"",python,selection_command
|
17 |
+
17,198509,"utils/nn.py",12311,58,"",python,content
|
18 |
+
18,198539,"utils/nn.py",12319,0,"",python,selection_command
|
19 |
+
19,199494,"utils/nn.py",12910,0,"",python,selection_command
|
20 |
+
20,200159,"utils/nn.py",12912,0,"",python,selection_command
|
21 |
+
21,200399,"utils/nn.py",12921,0,"",python,selection_command
|
22 |
+
22,200449,"utils/nn.py",12970,0,"",python,selection_command
|
23 |
+
23,200463,"utils/nn.py",13011,0,"",python,selection_command
|
24 |
+
24,200504,"utils/nn.py",13043,0,"",python,selection_command
|
25 |
+
25,200533,"utils/nn.py",13077,0,"",python,selection_command
|
26 |
+
26,200553,"utils/nn.py",13123,0,"",python,selection_command
|
27 |
+
27,200598,"utils/nn.py",13165,0,"",python,selection_command
|
28 |
+
28,200775,"utils/nn.py",13211,0,"",python,selection_command
|
29 |
+
29,200911,"utils/nn.py",13253,0,"",python,selection_command
|
30 |
+
30,201154,"utils/nn.py",13303,0,"",python,selection_command
|
31 |
+
31,201188,"utils/nn.py",13341,0,"",python,selection_command
|
32 |
+
32,201214,"utils/nn.py",13407,0,"",python,selection_command
|
33 |
+
33,201254,"utils/nn.py",13442,0,"",python,selection_command
|
34 |
+
34,201289,"utils/nn.py",13473,0,"",python,selection_command
|
35 |
+
35,201319,"utils/nn.py",13491,0,"",python,selection_command
|
36 |
+
36,201354,"utils/nn.py",13505,0,"",python,selection_command
|
37 |
+
37,201392,"utils/nn.py",13545,0,"",python,selection_command
|
38 |
+
38,201424,"utils/nn.py",13585,0,"",python,selection_command
|
39 |
+
39,201469,"utils/nn.py",13624,0,"",python,selection_command
|
40 |
+
40,201504,"utils/nn.py",13666,0,"",python,selection_command
|
41 |
+
41,201522,"utils/nn.py",13696,0,"",python,selection_command
|
42 |
+
42,201570,"utils/nn.py",13719,0,"",python,selection_command
|
43 |
+
43,201584,"utils/nn.py",13721,0,"",python,selection_command
|
44 |
+
44,201624,"utils/nn.py",13730,0,"",python,selection_command
|
45 |
+
45,201673,"utils/nn.py",13840,0,"",python,selection_command
|
46 |
+
46,201705,"utils/nn.py",13882,0,"",python,selection_command
|
47 |
+
47,201724,"utils/nn.py",13924,0,"",python,selection_command
|
48 |
+
48,201761,"utils/nn.py",13966,0,"",python,selection_command
|
49 |
+
49,201810,"utils/nn.py",13996,0,"",python,selection_command
|
50 |
+
50,201834,"utils/nn.py",14005,0,"",python,selection_command
|
51 |
+
51,201863,"utils/nn.py",14039,0,"",python,selection_command
|
52 |
+
52,201966,"utils/nn.py",14077,0,"",python,selection_command
|
53 |
+
53,201966,"utils/nn.py",14086,0,"",python,selection_command
|
54 |
+
54,201967,"utils/nn.py",14129,0,"",python,selection_command
|
55 |
+
55,201993,"utils/nn.py",14143,0,"",python,selection_command
|
56 |
+
56,202049,"utils/nn.py",14152,0,"",python,selection_command
|
57 |
+
57,202090,"utils/nn.py",14194,0,"",python,selection_command
|
58 |
+
58,202105,"utils/nn.py",14260,0,"",python,selection_command
|
59 |
+
59,202149,"utils/nn.py",14261,0,"",python,selection_command
|
60 |
+
60,202184,"utils/nn.py",14270,0,"",python,selection_command
|
61 |
+
61,202194,"utils/nn.py",14303,0,"",python,selection_command
|
62 |
+
62,202235,"utils/nn.py",14313,0,"",python,selection_command
|
63 |
+
63,202266,"utils/nn.py",14333,0,"",python,selection_command
|
64 |
+
64,202389,"utils/nn.py",14313,0,"",python,selection_command
|
65 |
+
65,202644,"utils/nn.py",14303,0,"",python,selection_command
|
66 |
+
66,202684,"utils/nn.py",14270,0,"",python,selection_command
|
67 |
+
67,202706,"utils/nn.py",14261,0,"",python,selection_command
|
68 |
+
68,202746,"utils/nn.py",14260,0,"",python,selection_command
|
69 |
+
69,202774,"utils/nn.py",14194,0,"",python,selection_command
|
70 |
+
70,202840,"utils/nn.py",14152,0,"",python,selection_command
|
71 |
+
71,202859,"utils/nn.py",14143,0,"",python,selection_command
|
72 |
+
72,202879,"utils/nn.py",14129,0,"",python,selection_command
|
73 |
+
73,202902,"utils/nn.py",14086,0,"",python,selection_command
|
74 |
+
74,202948,"utils/nn.py",14077,0,"",python,selection_command
|
75 |
+
75,202973,"utils/nn.py",14039,0,"",python,selection_command
|
76 |
+
76,203009,"utils/nn.py",14005,0,"",python,selection_command
|
77 |
+
77,203049,"utils/nn.py",13996,0,"",python,selection_command
|
78 |
+
78,203404,"utils/nn.py",13966,0,"",python,selection_command
|
79 |
+
79,204040,"utils/nn.py",13958,38,"",python,content
|
80 |
+
80,211356,"utils/nn.py",0,0,"",python,tab
|
81 |
+
81,211357,"utils/nn.py",12311,0,"",python,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-59af9530-ed37-4620-9980-6c646b3d58821751599132847-2025_07_04-05.19.59.945/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-5b1a6152-1602-4538-a4b1-6fa9507221151753212707189-2025_07_22-21.32.36.855/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6829bcbf-f7fb-4481-92ea-521e9af7eabb1754058671446-2025_08_01-16.31.17.606/source.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,3,"utils/nn.py",0,0,"import math\nfrom typing import Tuple, Callable, List\n\nfrom flax import nnx\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nnx.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n def __init__(self, d_model: int, max_len: int = 5000):\n self.d_model = d_model\n self.max_len = max_len\n\n pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n pe = pe.at[:, 0::2].set(jnp.sin(position * div_term))\n pe = pe.at[:, 1::2].set(jnp.cos(position * div_term))\n self.pe = nnx.Variable(pe)\n\n def __call__(self, x: jax.Array) -> jax.Array:\n x = x + self.pe[: x.shape[2]]\n return x\n\n\nclass STBlock(nnx.Module):\n def __init__(\n self,\n dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.dim = dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.spatial_pos_enc = PositionalEncoding(self.dim)\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=False\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.temporal_pos_enc = PositionalEncoding(self.dim)\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.dim,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=False,\n )\n\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z_BTNM = self.spatial_pos_enc(x_BTNM)\n z_BTNM = self.spatial_norm(z_BTNM)\n z_BTNM = self.spatial_attention(z_BTNM)\n x_BTNM = x_BTNM + z_BTNM\n\n # --- Temporal attention ---\n x_BNTM = x_BTNM.swapaxes(1, 2)\n z_BNTM = self.temporal_pos_enc(x_BNTM)\n z_BNTM = self.temporal_norm(z_BNTM)\n z_BNTM = self.temporal_attention(z_BNTM)\n x_BNTM = x_BNTM + z_BNTM\n x_BTNM = x_BNTM.swapaxes(1, 2)\n\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\n\nclass STTransformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.blocks = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n STBlock(\n dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n )\n\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n\n for block in self.blocks:\n x_BTNM = block(x_BTNM)\n\n x_BTNO = self.output_dense(x_BTNM)\n return x_BTNO\n\nclass TransformerBlock(nnx.Module):\n def __init__(\n self,\n model_dim: int,\n ffn_dim: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.decode = decode\n\n self.temporal_pos_enc = PositionalEncoding(self.model_dim)\n self.spatial_pos_enc = PositionalEncoding(self.model_dim)\n self.temporal_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.spatial_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_norm = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.temporal_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.spatial_attention = nnx.MultiHeadAttention(\n num_heads=self.num_heads,\n in_features=self.model_dim,\n qkv_features=self.model_dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(\n self.use_flash_attention, is_causal=True\n ),\n rngs=rngs,\n decode=self.decode,\n )\n self.ffn_dense1 = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.ffn_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.ffn_dense2 = nnx.Linear(\n in_features=self.ffn_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n @nnx.remat\n def __call__(self, x_BTNM: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n # --- Spatial attention ---\n B, T, N, M = x_BTNM.shape\n z_FNM = einops.rearrange(x_BTNM, ""b t n m -> (b t) n m"")\n z_FNM = self.spatial_norm(z_FNM)\n if self.decode:\n assert pos_index is not None\n z_FM = z_FNM[:, pos_index[1]]\n z_F1M = jnp.reshape(z_FM, (B * T, 1, M))\n z_F1M = self.spatial_attention(z_F1M)\n z_FM = jnp.reshape(z_F1M, (B * T, M))\n z_FNM = z_FNM.at[:, pos_index[1], :].set(z_FM)\n else:\n z_FNM = self.spatial_attention(z_FNM)\n z_BTNM = einops.rearrange(z_FNM, ""(b t) n m -> b t n m"", t=T)\n x_BTNM = x_BTNM + z_BTNM\n # --- Temporal attention ---\n z_PTM = einops.rearrange(x_BTNM, ""b t n m -> (b n) t m"")\n z_PTM = self.temporal_norm(z_PTM)\n if self.decode:\n assert pos_index is not None\n z_PM = z_PTM[:, pos_index[0]]\n z_P1M = jnp.reshape(z_PM, (B * N, 1, M))\n z_P1M = self.temporal_attention(z_P1M)\n z_PM = jnp.reshape(z_P1M, (B * N, M))\n z_PTM = z_PTM.at[:, pos_index[0], :].set(z_PM)\n else:\n z_PTM = self.temporal_attention(z_PTM)\n z_BTNM = einops.rearrange(z_PTM, ""(b n) t m -> b t n m"", n=N)\n x_BTNM = x_BTNM + z_BTNM\n # --- Feedforward ---\n z_BTNM = self.ffn_norm(x_BTNM)\n z_BTND = self.ffn_dense1(z_BTNM)\n z_BTND = jax.nn.gelu(z_BTND)\n z_BTNM = self.ffn_dense2(z_BTND)\n x_BTNM = x_BTNM + z_BTNM\n\n return x_BTNM\n\nclass Transformer(nnx.Module):\n """"""\n Dimension keys:\n B: batch size\n T: number of frames\n N: number of patches per frame\n I: number of input features\n M: model dimension\n D: FFN dimension\n O: number of output features\n F: number of frames in batch\n P: number of patch positions in batch\n """"""\n def __init__(\n self,\n input_dim: int,\n model_dim: int,\n ffn_dim: int,\n out_dim: int,\n num_blocks: int,\n num_heads: int,\n dropout: float,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n decode: bool,\n rngs: nnx.Rngs,\n ):\n self.input_dim = input_dim\n self.model_dim = model_dim\n self.ffn_dim = ffn_dim\n self.out_dim = out_dim\n self.num_blocks = num_blocks\n self.num_heads = num_heads\n self.dropout = dropout\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n\n self.pos_enc = PositionalEncoding(self.model_dim)\n self.input_norm1 = nnx.LayerNorm(\n num_features=self.input_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_dense = nnx.Linear(\n in_features=self.input_dim,\n out_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n self.input_norm2 = nnx.LayerNorm(\n num_features=self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n self.blocks: List[TransformerBlock] = []\n for _ in range(self.num_blocks):\n self.blocks.append(\n TransformerBlock(\n model_dim=self.model_dim,\n ffn_dim=self.ffn_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n decode=decode,\n rngs=rngs,\n )\n )\n self.output_dense = nnx.Linear(\n in_features=self.model_dim,\n out_features=self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n rngs=rngs,\n )\n\n def __call__(self, x_BTNI: jax.Array, pos_index: Tuple[jax.Array, jax.Array] | None = None) -> jax.Array:\n x_BTNI = self.input_norm1(x_BTNI)\n x_BTNM = self.input_dense(x_BTNI)\n x_BTNM = self.input_norm2(x_BTNM)\n x_BTNM = self.pos_enc(x_BTNM)\n\n for block in self.blocks:\n x_BTNM = block(x_BTNM, pos_index)\n\n x_BTNV = self.output_dense(x_BTNM)\n return x_BTNV\n\ndef normalize(x: jax.Array) -> jax.Array:\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nnx.Module):\n """"""\n Dimension keys:\n D: B * T * N\n K: number of latents\n L: latent dimension\n """"""\n def __init__(\n self, latent_dim: int, num_latents: int, dropout: float, rngs: nnx.Rngs\n ):\n self.latent_dim = latent_dim\n self.num_latents = num_latents\n self.dropout = dropout\n\n self.codebook = nnx.Param(\n normalize(\n nnx.initializers.lecun_uniform()(\n rngs.params(), (self.num_latents, self.latent_dim)\n )\n )\n )\n self.drop = nnx.Dropout(self.dropout, rngs=rngs)\n\n def __call__(\n self, x_DL: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x_DL = normalize(x_DL)\n normalized_codebook_KL = normalize(self.codebook.value)\n distance_DK = -jnp.matmul(x_DL, normalized_codebook_KL.T)\n if training:\n distance_DK = self.drop(distance_DK)\n\n # --- Get indices and embeddings ---\n indices_D = jnp.argmin(distance_DK, axis=-1)\n z_DL = self.codebook[indices_D]\n\n # --- Straight through estimator ---\n z_q_DL = x_DL + jax.lax.stop_gradient(z_DL - x_DL)\n return z_q_DL, z_DL, x_DL, indices_D\n\n def get_codes(self, indices_E: jax.Array) -> jax.Array:\n return self.codebook[indices_E]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool) -> Callable:\n """"""\n Create an attention function that uses flash attention if enabled.\n\n flax.nnx.MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim),\n but jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim). We reshape to\n ensure compatibility. cuDNN's flash attention additionally requires a sequence length that\n is a multiple of 4. We pad the sequence length to the nearest multiple of 4 and mask\n accordingly. Note that cuDNN requires the mask to be broadcast before calling the attention\n function due to strict shape checking.\n """"""\n\n # FIXME (f.srambical): keys and values could have different dimensionalities\n def attention_fn(query_BSHD, key_BSHD, value_BSHD, bias=None, mask_B111=None, **kwargs):\n implementation = ""cudnn"" if use_flash_attention else None\n\n def _merge_batch_dims(x):\n return einops.rearrange(x, ""... l h k -> (...) l h k"")\n\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n\n original_shape = query_BSHD.shape\n original_seq_len = query_BSHD.shape[-3]\n\n # Pad to nearest multiple of 4\n T = ((original_seq_len + 3) // 4) * 4\n pad_size = T - original_seq_len\n\n query_BTHD = _pad(_merge_batch_dims(query_BSHD))\n key_BTHD = _pad(_merge_batch_dims(key_BSHD))\n value_BTHD = _pad(_merge_batch_dims(value_BSHD))\n B = query_BTHD.shape[0]\n\n attention_mask = jnp.ones((T, T), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n # Handle causal mask for cached decoder self-attention (from nnx.MultiHeadAttention)\n if mask_B111 is not None:\n mask_B111 = _merge_batch_dims(mask_B111)\n # We need to broadcast T and S dimensions to target_seq_len since cudnn attention strictly checks the mask shape\n # https://github.com/jax-ml/jax/issues/28974\n # https://github.com/jax-ml/jax/blob/08c7677393672ccb85c10f1ed0bd506905c3c994/jax/_src/cudnn/fused_attention_stablehlo.py#L1830\n # https://github.com/jax-ml/jax/blob/08c7677393672ccb85c10f1ed0bd506905c3c994/jax/_src/cudnn/fused_attention_stablehlo.py#L337\n mask_B1QK = einops.repeat(mask_B111, ""... 1 1 -> ... t s"", t=T, s=T)\n mask_B1QK = mask_B111.astype(jnp.bool)\n else:\n mask_11QK = attention_mask[jnp.newaxis, jnp.newaxis, :, :]\n mask_B1QK = jnp.broadcast_to(mask_11QK, (B, 1, T, T))\n\n bias_4d = _pad(_merge_batch_dims(bias)) if bias is not None else None\n\n # NOTE: jax.nn.dot_product_attention does not support dropout\n output_4d = jax.nn.dot_product_attention(\n query=query_BTHD,\n key=key_BTHD,\n value=value_BTHD,\n bias=bias_4d,\n mask=mask_B1QK,\n implementation=implementation,\n is_causal=is_causal,\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n\n return attention_fn\n",python,tab
|
3 |
+
2,333,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"4:31:17 PM [info] Activating crowd-code\n4:31:17 PM [info] Recording started\n4:31:17 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,381,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"4:31:17 PM [info] Git repository found\n4:31:17 PM [info] Git provider initialized successfully\n4:31:17 PM [info] Initial git state: [object Object]\n",Log,content
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6a683910-8d55-4299-9066-894bbed6c97c1754399347661-2025_08_05-15.09.15.958/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-6cc07fef-c3ba-4457-9c7c-5fe5347d53561751545506463-2025_07_03-14.26.06.366/source.csv
ADDED
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,4,"generate_arrayrecord_dataset.py",0,0,"#!/usr/bin/env python3\n""""""\nGenerate ArrayRecord dataset compatible with train_tokenizer.py\n\nThis script creates synthetic video data and saves it in ArrayRecord format\nthat can be used by the tokenizer training script.\n""""""\n\nimport os\nimport pickle\nimport numpy as np\nimport grain\nfrom array_record.python.array_record_module import ArrayRecordWriter\nimport argparse\nimport multiprocessing as mp\nfrom functools import partial\nimport time\n\n\ndef generate_synthetic_video(\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3\n) -> np.ndarray:\n """"""\n Generate synthetic video data with random frames for training.\n \n Args:\n seq_len: Number of frames in the video sequence\n height: Height of each frame\n width: Width of each frame\n channels: Number of color channels\n \n Returns:\n Video array of shape (seq_len, height, width, channels)\n """"""\n video = np.random.rand(seq_len, height, width, channels).astype(np.float32)\n \n return video\n\n\ndef create_single_arrayrecord_file(\n file_info: tuple,\n output_dir: str,\n seq_len: int,\n height: int,\n width: int,\n channels: int,\n records_per_file: int,\n seed: int\n) -> tuple:\n """"""\n Create a single ArrayRecord file with synthetic video data.\n \n Args:\n file_info: Tuple of (file_idx, start_idx, end_idx)\n output_dir: Directory to save ArrayRecord files\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n \n Returns:\n Tuple of (file_path, num_videos_created, success)\n """"""\n file_idx, start_idx, end_idx = file_info\n videos_in_file = end_idx - start_idx\n \n # Set seed for this process (add file_idx to make each file different)\n np.random.seed(seed + file_idx)\n \n file_path = os.path.join(output_dir, f""videos_{file_idx:04d}.array_record"")\n \n try:\n writer = ArrayRecordWriter(file_path, ""group_size:1"")\n \n for video_idx in range(videos_in_file):\n video = generate_synthetic_video(seq_len, height, width, channels)\n \n # Convert to uint8 format as expected by the dataloader\n video_uint8 = (video * 255).astype(np.uint8)\n \n # Create record in the format expected by ProcessEpisodeAndSlice\n record = {\n ""raw_video"": video_uint8.tobytes(),\n ""sequence_length"": seq_len\n }\n \n writer.write(pickle.dumps(record))\n \n writer.close()\n return (file_path, videos_in_file, True)\n \n except Exception as e:\n print(f""Error creating file {file_path}: {e}"")\n return (file_path, 0, False)\n\n\ndef create_arrayrecord_dataset(\n output_dir: str,\n num_videos: int = 1000,\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3,\n records_per_file: int = 100,\n seed: int = 42,\n num_processes: int | None = None\n):\n """"""\n Create ArrayRecord dataset with synthetic video data using multiprocessing.\n \n Args:\n output_dir: Directory to save ArrayRecord files\n num_videos: Total number of videos to generate\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n num_processes: Number of processes to use (None for auto-detect)\n """"""\n os.makedirs(output_dir, exist_ok=True)\n \n num_files = (num_videos + records_per_file - 1) // records_per_file\n \n print(f""Generating {num_videos} videos across {num_files} ArrayRecord files..."")\n print(f""Each file will contain up to {records_per_file} videos"")\n print(f""Video dimensions: {seq_len} frames × {height}×{width}×{channels}"")\n \n # Prepare file information for each worker\n file_infos = []\n for file_idx in range(num_files):\n start_idx = file_idx * records_per_file\n end_idx = min((file_idx + 1) * records_per_file, num_videos)\n file_infos.append((file_idx, start_idx, end_idx))\n \n # Set number of processes (use CPU count if not specified)\n if num_processes is None:\n num_processes = min(mp.cpu_count(), num_files)\n \n print(f""Using {num_processes} processes for parallel generation..."")\n \n start_time = time.time()\n \n # Create partial function with fixed arguments\n worker_func = partial(\n create_single_arrayrecord_file,\n output_dir=output_dir,\n seq_len=seq_len,\n height=height,\n width=width,\n channels=channels,\n records_per_file=records_per_file,\n seed=seed\n )\n \n # Use multiprocessing to create files in parallel\n with mp.Pool(processes=num_processes) as pool:\n results = pool.map(worker_func, file_infos)\n \n end_time = time.time()\n \n # Process results\n total_records = 0\n successful_files = 0\n \n for file_path, num_videos_created, success in results:\n if success:\n print(f""✓ Created {file_path} with {num_videos_created} videos"")\n total_records += num_videos_created\n successful_files += 1\n else:\n print(f""✗ Failed to create {file_path}"")\n \n print(f""\nDataset generation complete!"")\n print(f""Total videos generated: {total_records}"")\n print(f""Successful files: {successful_files}/{num_files}"")\n print(f""Files created in: {output_dir}"")\n print(f""Generation time: {end_time - start_time:.2f} seconds"")\n\n\ndef verify_dataset(output_dir: str, num_samples: int = 5):\n """"""\n Verify the generated dataset using Grain's ArrayRecordDataSource.\n \n Args:\n output_dir: Directory containing ArrayRecord files\n num_samples: Number of samples to verify\n """"""\n print(f""\nVerifying dataset in {output_dir}..."")\n \n # Find all ArrayRecord files\n array_record_files = [\n os.path.join(output_dir, f) \n for f in os.listdir(output_dir) \n if f.endswith('.array_record')\n ]\n \n if not array_record_files:\n print(""No ArrayRecord files found!"")\n return\n \n print(f""Found {len(array_record_files)} ArrayRecord files"")\n \n # Use Grain's ArrayRecordDataSource as shown in the documentation\n try:\n data_source = grain.sources.ArrayRecordDataSource(array_record_files[0])\n print(f""Number of records in first file: {len(data_source)}"")\n \n # Load and verify a few samples\n for i in range(min(num_samples, len(data_source))):\n record_bytes = data_source[i]\n record = pickle.loads(record_bytes)\n \n # Reconstruct video from raw_video bytes\n video_shape = (record[""sequence_length""], 90, 160, 3) # Hardcoded for now\n video = np.frombuffer(record[""raw_video""], dtype=np.uint8).reshape(video_shape)\n \n print(f"" Record {i}: video shape = {video.shape}, dtype = {video.dtype}"")\n print(f"" Value range: [{video.min()}, {video.max()}]"")\n print(f"" Mean: {video.mean():.1f}"")\n print(f"" Sequence length: {record['sequence_length']}"")\n \n except Exception as e:\n print(f""Error reading ArrayRecord file: {e}"")\n print(""This might indicate a file format issue."")\n\n\ndef main():\n parser = argparse.ArgumentParser(description=""Generate ArrayRecord dataset for tokenizer training"")\n parser.add_argument(""--output_dir"", type=str, default=""data_arrayrecord/dummy"",\n help=""Output directory for ArrayRecord files"")\n parser.add_argument(""--num_videos"", type=int, default=1000,\n help=""Total number of videos to generate"")\n parser.add_argument(""--seq_len"", type=int, default=16,\n help=""Number of frames per video"")\n parser.add_argument(""--height"", type=int, default=90,\n help=""Frame height"")\n parser.add_argument(""--width"", type=int, default=160,\n help=""Frame width"")\n parser.add_argument(""--channels"", type=int, default=3,\n help=""Number of color channels"")\n parser.add_argument(""--records_per_file"", type=int, default=100,\n help=""Number of records per ArrayRecord file"")\n parser.add_argument(""--seed"", type=int, default=42,\n help=""Random seed for reproducibility"")\n parser.add_argument(""--num_processes"", type=int, default=None,\n help=""Number of processes to use (default: auto-detect)"")\n parser.add_argument(""--verify"", action=""store_true"",\n help=""Verify the generated dataset"")\n \n args = parser.parse_args()\n \n # Generate the dataset\n create_arrayrecord_dataset(\n output_dir=args.output_dir,\n num_videos=args.num_videos,\n seq_len=args.seq_len,\n height=args.height,\n width=args.width,\n channels=args.channels,\n records_per_file=args.records_per_file,\n seed=args.seed,\n num_processes=args.num_processes\n )\n \n # Verify if requested\n if args.verify:\n verify_dataset(args.output_dir)\n \nif __name__ == ""__main__"":\n main()\n",python,tab
|
3 |
+
2,857,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"2:26:06 PM [info] Activating crowd-code\n2:26:06 PM [info] Recording started\n2:26:06 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,1273,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"2:26:06 PM [info] Git repository found\n2:26:06 PM [info] Git provider initialized successfully\n2:26:07 PM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,11151,"TERMINAL",0,0,"/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
|
6 |
+
5,11204,"TERMINAL",0,0,"]633;E;/usr/bin/python3 /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;f19a05cb-78d0-4a81-8f50-34faa4bc2da0]633;C",,terminal_output
|
7 |
+
6,11340,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit02:/ictstr01/home/aih/franz.srambical/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
|
8 |
+
7,886202,"generate_arrayrecord_dataset.py",0,0,"",python,tab
|
9 |
+
8,890778,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
10 |
+
9,892360,"generate_arrayrecord_dataset.py",0,0,"",python,tab
|
11 |
+
10,894839,"TERMINAL",0,0,"",,terminal_focus
|
12 |
+
11,920424,"TERMINAL",0,0,"salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=8",,terminal_command
|
13 |
+
12,920476,"TERMINAL",0,0,"]633;E;salloc --reservation=haicu_stefan -p gpu_p --time=05:00:00 --job-name=interactive_bash --qos=gpu_normal --gres=gpu:2 -w gpusrv69,gpusrv70 --cpus-per-task=8;b5dce3d8-bcc4-44ac-80e5-c959aa4597d3]633;C",,terminal_output
|
14 |
+
13,921927,"TERMINAL",0,0,"salloc: Granted job allocation 26644011\r\n",,terminal_output
|
15 |
+
14,922030,"TERMINAL",0,0,"salloc: Nodes gpusrv[69-70] are ready for job\r\n",,terminal_output
|
16 |
+
15,922403,"TERMINAL",0,0,"]0;franz.srambical@hpc-submit02:/lustre/groups/haicu/workspace/franz.srambical/jafar[?2004h[franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
17 |
+
16,927055,"TERMINAL",0,0,"",,terminal_focus
|
18 |
+
17,927129,"TERMINAL",0,0,"\r[K[franz.srambical@gpusrv69 jafar]$ \r[K[franz.srambical@gpusrv69 jafar]$ \r[K[franz.srambical@gpusrv69 jafar]$ ",,terminal_output
|
19 |
+
18,929329,"TERMINAL",0,0,"squeue --me",,terminal_command
|
20 |
+
19,929379,"TERMINAL",0,0,"\r\n[?2004l\r]633;E;squeue --me;4c3d9cf3-b25c-4150-bc4d-a9e63f089437]633;C",,terminal_output
|
21 |
+
20,929566,"TERMINAL",0,0," JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\r\n 26644011 gpu_p interact franz.sr R 0:09 2 gpusrv[69-70]\r\n]0;franz.srambical@hpc-submit02:/lustre/groups/haicu/workspace/franz.srambical/jafar]633;D;0",,terminal_output
|
22 |
+
21,935053,"TERMINAL",0,0,"srun",,terminal_focus
|
23 |
+
22,948254,"generate_arrayrecord_dataset.py",9202,0,"",python,selection_command
|
24 |
+
23,948388,"generate_arrayrecord_dataset.py",8306,0,"",python,selection_command
|
25 |
+
24,948546,"generate_arrayrecord_dataset.py",6926,0,"",python,selection_command
|
26 |
+
25,948662,"generate_arrayrecord_dataset.py",6026,0,"",python,selection_command
|
27 |
+
26,948820,"generate_arrayrecord_dataset.py",5145,0,"",python,selection_command
|
28 |
+
27,948956,"generate_arrayrecord_dataset.py",4369,0,"",python,selection_command
|
29 |
+
28,949094,"generate_arrayrecord_dataset.py",3265,0,"",python,selection_command
|
30 |
+
29,949227,"generate_arrayrecord_dataset.py",2598,0,"",python,selection_command
|
31 |
+
30,961387,"generate_arrayrecord_dataset.py",2577,0,"",python,selection_mouse
|
32 |
+
31,965056,".gitignore",0,0,"*.pyc\n*.npy\n*.png\n*.gif\n*.tfrecord\n\nwandb_key\ncheckpoints/\nwandb/\n__pycache__/\nvs-code-recorder/\n",ignore,tab
|
33 |
+
32,965498,".gitignore",6,0,"",ignore,selection_command
|
34 |
+
33,965818,".gitignore",12,0,"",ignore,selection_command
|
35 |
+
34,965819,".gitignore",18,0,"",ignore,selection_command
|
36 |
+
35,965820,".gitignore",24,0,"",ignore,selection_command
|
37 |
+
36,965824,".gitignore",35,0,"",ignore,selection_command
|
38 |
+
37,965857,".gitignore",36,0,"",ignore,selection_command
|
39 |
+
38,965890,".gitignore",46,0,"",ignore,selection_command
|
40 |
+
39,965924,".gitignore",59,0,"",ignore,selection_command
|
41 |
+
40,965958,".gitignore",66,0,"",ignore,selection_command
|
42 |
+
41,965995,".gitignore",79,0,"",ignore,selection_command
|
43 |
+
42,966175,".gitignore",97,0,"",ignore,selection_command
|
44 |
+
43,971038,".gitignore",97,0,"d",ignore,content
|
45 |
+
44,971039,".gitignore",98,0,"",ignore,selection_keyboard
|
46 |
+
45,971119,".gitignore",98,0,"a",ignore,content
|
47 |
+
46,971120,".gitignore",99,0,"",ignore,selection_keyboard
|
48 |
+
47,971211,".gitignore",99,0,"t",ignore,content
|
49 |
+
48,971211,".gitignore",100,0,"",ignore,selection_keyboard
|
50 |
+
49,971270,".gitignore",100,0,"a",ignore,content
|
51 |
+
50,971271,".gitignore",101,0,"",ignore,selection_keyboard
|
52 |
+
51,971605,".gitignore",101,0,"_",ignore,content
|
53 |
+
52,971606,".gitignore",102,0,"",ignore,selection_keyboard
|
54 |
+
53,971794,".gitignore",102,0,"a",ignore,content
|
55 |
+
54,971795,".gitignore",103,0,"",ignore,selection_keyboard
|
56 |
+
55,971934,".gitignore",103,0,"r",ignore,content
|
57 |
+
56,971935,".gitignore",104,0,"",ignore,selection_keyboard
|
58 |
+
57,972020,".gitignore",104,0,"r",ignore,content
|
59 |
+
58,972021,".gitignore",105,0,"",ignore,selection_keyboard
|
60 |
+
59,972118,".gitignore",105,0,"a",ignore,content
|
61 |
+
60,972119,".gitignore",106,0,"",ignore,selection_keyboard
|
62 |
+
61,972276,".gitignore",106,0,"y",ignore,content
|
63 |
+
62,972277,".gitignore",107,0,"",ignore,selection_keyboard
|
64 |
+
63,972670,".gitignore",107,0,"r",ignore,content
|
65 |
+
64,972671,".gitignore",108,0,"",ignore,selection_keyboard
|
66 |
+
65,972782,".gitignore",108,0,"e",ignore,content
|
67 |
+
66,972783,".gitignore",109,0,"",ignore,selection_keyboard
|
68 |
+
67,973180,".gitignore",109,0,"cord/",ignore,content
|
69 |
+
68,973361,".gitignore",113,0,"",ignore,selection_command
|
70 |
+
69,976836,"generate_arrayrecord_dataset.py",0,0,"",python,tab
|
71 |
+
70,977502,"generate_arrayrecord_dataset.py",0,0,"",python,tab
|
72 |
+
71,978906,"generate_arrayrecord_dataset.py",0,0,"",python,tab
|
73 |
+
72,978910,".gitignore",0,0,"",ignore,tab
|
74 |
+
73,981026,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n time_measurement_interval: int = 50\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n array_record_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n num_workers=8,\n # FIXME: try buffer size of 16\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n print(f""Starting training from step {step}..."")\n start_time = time.time()\n \n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n if step % args.time_measurement_interval == 0:\n jax.block_until_ready(train_state)\n elapsed_time = (time.time() - start_time)\n avg_step_time = elapsed_time / args.time_measurement_interval\n print(f""Step {step}, loss: {loss}, avg step time: {avg_step_time:.2f}s"")\n if args.log and jax.process_index() == 0:\n wandb.log({""avg_step_time_s"": avg_step_time, ""step"": step})\n start_time = time.time()\n else:\n print(f""Step {step}, loss: {loss}"")\n\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
75 |
+
74,981028,"train_tokenizer.py",1544,0,"",python,selection_command
|
76 |
+
75,981908,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n array_record_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n num_workers=16,\n prefetch_buffer_size=2,\n seed=args.seed,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
77 |
+
76,981910,"train_tokenizer.py",1511,0,"",python,selection_mouse
|
78 |
+
77,981922,"train_tokenizer.py",1510,0,"",python,selection_command
|
79 |
+
78,983151,"train_tokenizer.py",2328,0,"",python,selection_command
|
80 |
+
79,983839,"train_tokenizer.py",3215,0,"",python,selection_command
|
81 |
+
80,984087,"train_tokenizer.py",4035,0,"",python,selection_command
|
82 |
+
81,984118,"train_tokenizer.py",4850,0,"",python,selection_command
|
83 |
+
82,984151,"train_tokenizer.py",5926,0,"",python,selection_command
|
84 |
+
83,984182,"train_tokenizer.py",6754,0,"",python,selection_command
|
85 |
+
84,984216,"train_tokenizer.py",7582,0,"",python,selection_command
|
86 |
+
85,984321,"train_tokenizer.py",8600,0,"",python,selection_command
|
87 |
+
86,984322,"train_tokenizer.py",9570,0,"",python,selection_command
|
88 |
+
87,985205,"train_tokenizer.py",0,0,"",python,selection_command
|
89 |
+
88,985604,"train_tokenizer.py",583,0,"",python,selection_command
|
90 |
+
89,985771,"train_tokenizer.py",1212,0,"",python,selection_command
|
91 |
+
90,985994,"train_tokenizer.py",1877,0,"",python,selection_command
|
92 |
+
91,986119,"train_tokenizer.py",2819,0,"",python,selection_command
|
93 |
+
92,986294,"train_tokenizer.py",3725,0,"",python,selection_command
|
94 |
+
93,986457,"train_tokenizer.py",4532,0,"",python,selection_command
|
95 |
+
94,986617,"train_tokenizer.py",5551,0,"",python,selection_command
|
96 |
+
95,986778,"train_tokenizer.py",6420,0,"",python,selection_command
|
97 |
+
96,987528,"train_tokenizer.py",0,0,"",python,selection_command
|
98 |
+
97,991567,"train_tokenizer.py",0,0,"",python,tab
|
99 |
+
98,991704,"experiments/tokenizer_time_measurement_grain.sh",0,0,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\n\ndata_dir='data_arrayrecord/dummy'\n\n# FIXME: the `time_measurement_interval` line is only for debugging and should be removed\nsrun python train_tokenizer.py \\n --time_measurement_interval 10 \\n --batch_size 48 \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
100 |
+
99,994012,"utils/dataloader.py",0,0,"import jax\nimport numpy as np\nimport grain\nfrom typing import Any, Optional\nfrom array_record.python.array_record_module import ArrayRecordWriter\nimport tensorflow as tf\nimport os\nfrom pathlib import Path\nimport pickle\nimport multiprocessing as mp\nfrom functools import partial\nimport logging\n\ngrain.config.update(""py_debug_mode"", True)\n\n# Configure logging to see debug output\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\ndef _convert_single_tfrecord(\n tfrecord_file: Path,\n output_folder: str,\n feature_description: dict,\n) -> str:\n """"""\n Convert a single TFRecord file to ArrayRecord format.\n \n Args:\n tfrecord_file: Path to the TFRecord file\n output_folder: Output folder for the ArrayRecord file\n feature_description: Dictionary describing TFRecord features\n \n Returns:\n Path to the created ArrayRecord file\n """"""\n output_filename = tfrecord_file.stem + "".array_record""\n output_file = os.path.join(output_folder, output_filename)\n \n dataset = tf.data.TFRecordDataset(str(tfrecord_file))\n \n def parse_tfrecord(example_proto):\n """"""Parse a single TFRecord example.""""""\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\n raw_video_bytes = parsed_features['raw_video'].numpy()\n sequence_length = int(parsed_features['sequence_length'].numpy())\n \n return {\n 'raw_video': raw_video_bytes,\n 'sequence_length': sequence_length,\n }\n \n record_count = 0\n writer = ArrayRecordWriter(output_file, ""group_size:1"")\n for record in dataset:\n parsed_record = parse_tfrecord(record)\n writer.write(pickle.dumps(parsed_record))\n record_count += 1\n writer.close()\n \n print(f""Converted {tfrecord_file.name} -> {output_filename}: {record_count} records"")\n return output_file\n\n\ndef convert_tfrecords_to_arrayrecords(\n tfrecord_folder: str,\n output_folder: str,\n feature_description: Optional[dict] = None,\n num_workers: Optional[int] = None,\n):\n """"""\n Converts TFRecord files to ArrayRecord format for use with Grain.\n Creates one ArrayRecord file per TFRecord file using multiprocessing.\n \n Args:\n tfrecord_folder: Path to folder containing TFRecord files\n output_folder: Path to output folder for ArrayRecord files\n feature_description: Dictionary describing TFRecord features. If None,\n uses default description for video data.\n num_workers: Number of worker processes. If None, uses CPU count.\n \n Returns:\n List of paths to created ArrayRecord files\n """"""\n if feature_description is None:\n feature_description = {\n 'raw_video': tf.io.FixedLenFeature([], tf.string),\n 'sequence_length': tf.io.FixedLenFeature([], tf.int64),\n }\n \n os.makedirs(output_folder, exist_ok=True)\n \n tfrecord_files = list(Path(tfrecord_folder).glob(""*.tfrecord""))\n if not tfrecord_files:\n raise ValueError(f""No TFRecord files found in {tfrecord_folder}"")\n \n print(f""Found {len(tfrecord_files)} TFRecord files"")\n \n if num_workers is None:\n num_workers = min(mp.cpu_count(), len(tfrecord_files))\n \n print(f""Using {num_workers} worker processes for conversion"")\n \n convert_func = partial(\n _convert_single_tfrecord,\n output_folder=output_folder,\n feature_description=feature_description\n )\n \n with mp.Pool(processes=num_workers) as pool:\n arrayrecord_files = pool.map(convert_func, tfrecord_files)\n \n print(f""Conversion complete! Created {len(arrayrecord_files)} ArrayRecord files"")\n return arrayrecord_files\n\n\nclass ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the transformation with processing parameters.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def random_map(self, element: dict, rng: np.random.Generator) -> Any:\n """"""\n Processes a single raw episode from the data source.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes) and 'sequence_length' (int)\n rng: A per-record random number generator provided by the Grain sampler.\n\n Returns:\n A processed video sequence as a NumPy array with shape\n (seq_len, height, width, channels) and dtype float32.\n """"""\n assert isinstance(element, bytes)\n element = pickle.loads(element)\n \n video_shape = (\n element[""sequence_length""],\n self.image_h,\n self.image_w,\n self.image_c,\n )\n episode_tensor = np.frombuffer(element[""raw_video""], dtype=np.uint8)\n episode_tensor = episode_tensor.reshape(video_shape)\n\n current_episode_len = episode_tensor.shape[0]\n if current_episode_len < self.seq_len:\n raise ValueError(f""An episode has length {current_episode_len}, which is ""\n f""shorter than the requested sequence length {self.seq_len}."")\n \n max_start_idx = current_episode_len - self.seq_len\n \n start_idx = rng.integers(0, max_start_idx + 1)\n\n seq = episode_tensor[start_idx : start_idx + self.seq_len]\n\n processed_sequence = seq.astype(np.float32) / 255.0\n\n return processed_sequence\n\n\ndef get_dataloader(\n array_record_paths: list[str],\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n num_workers: int = 1,\n prefetch_buffer_size: int = 1,\n seed: int = 42,\n):\n """"""\n Creates a data loading pipeline using Grain.\n """"""\n if not array_record_paths:\n raise ValueError(""array_record_paths list cannot be empty."")\n\n num_processes = jax.process_count()\n\n if global_batch_size % num_processes != 0:\n raise ValueError(\n f""Global batch size {global_batch_size} must be divisible by ""\n f""the number of JAX processes {num_processes} for proper sharding.""\n )\n per_process_batch_size = global_batch_size // num_processes\n\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\n \n sampler = grain.samplers.SequentialSampler(\n num_records=len(source),\n shard_options=grain.sharding.ShardByJaxProcess(drop_remainder=True),\n # FIXME: check whether the global shuffle is the reason why the dataloader is so slow\n # shuffle=False,\n # num_epochs=100, # FIXME: is there an equivalent to tf.data.repeat(None)?\n seed=seed,\n )\n\n operations = [\n ProcessEpisodeAndSlice(\n seq_len=seq_len, image_h=image_h, image_w=image_w, image_c=image_c\n ),\n grain.transforms.Batch(batch_size=per_process_batch_size, drop_remainder=True),\n ]\n\n read_options = grain.ReadOptions(\n prefetch_buffer_size=prefetch_buffer_size,\n # FIXME: `If the data is already loaded in memory, we recommend setting this to 0 to\n # avoid Python GIL contention by multiple threads.`\n num_threads=1,\n )\n dataloader = grain.DataLoader(\n data_source=source,\n sampler=sampler,\n operations=operations,\n worker_count=num_workers,\n # FIXME: think about whether we should tune this\n worker_buffer_size=1,\n read_options=read_options,\n )\n\n return iter(dataloader)\n\n",python,tab
|
101 |
+
100,994120,"utils/dataloader.py",278,0,"",python,selection_command
|
102 |
+
101,999045,"utils/dataloader.py",278,6652,"\n\n\ndef _convert_single_tfrecord(\n tfrecord_file: Path,\n output_folder: str,\n feature_description: dict,\n) -> str:\n """"""\n Convert a single TFRecord file to ArrayRecord format.\n \n Args:\n tfrecord_file: Path to the TFRecord file\n output_folder: Output folder for the ArrayRecord file\n feature_description: Dictionary describing TFRecord features\n \n Returns:\n Path to the created ArrayRecord file\n """"""\n output_filename = tfrecord_file.stem + "".array_record""\n output_file = os.path.join(output_folder, output_filename)\n \n dataset = tf.data.TFRecordDataset(str(tfrecord_file))\n \n def parse_tfrecord(example_proto):\n """"""Parse a single TFRecord example.""""""\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\n raw_video_bytes = parsed_features['raw_video'].numpy()\n sequence_length = int(parsed_features['sequence_length'].numpy())\n \n return {\n 'raw_video': raw_video_bytes,\n 'sequence_length': sequence_length,\n }\n \n record_count = 0\n writer = ArrayRecordWriter(output_file, ""group_size:1"")\n for record in dataset:\n parsed_record = parse_tfrecord(record)\n writer.write(pickle.dumps(parsed_record))\n record_count += 1\n writer.close()\n \n print(f""Converted {tfrecord_file.name} -> {output_filename}: {record_count} records"")\n return output_file\n\n\ndef convert_tfrecords_to_arrayrecords(\n tfrecord_folder: str,\n output_folder: str,\n feature_description: Optional[dict] = None,\n num_workers: Optional[int] = None,\n):\n """"""\n Converts TFRecord files to ArrayRecord format for use with Grain.\n Creates one ArrayRecord file per TFRecord file using multiprocessing.\n \n Args:\n tfrecord_folder: Path to folder containing TFRecord files\n output_folder: Path to output folder for ArrayRecord files\n feature_description: Dictionary describing TFRecord features. If None,\n uses default description for video data.\n num_workers: Number of worker processes. If None, uses CPU count.\n \n Returns:\n List of paths to created ArrayRecord files\n """"""\n if feature_description is None:\n feature_description = {\n 'raw_video': tf.io.FixedLenFeature([], tf.string),\n 'sequence_length': tf.io.FixedLenFeature([], tf.int64),\n }\n \n os.makedirs(output_folder, exist_ok=True)\n \n tfrecord_files = list(Path(tfrecord_folder).glob(""*.tfrecord""))\n if not tfrecord_files:\n raise ValueError(f""No TFRecord files found in {tfrecord_folder}"")\n \n print(f""Found {len(tfrecord_files)} TFRecord files"")\n \n if num_workers is None:\n num_workers = min(mp.cpu_count(), len(tfrecord_files))\n \n print(f""Using {num_workers} worker processes for conversion"")\n \n convert_func = partial(\n _convert_single_tfrecord,\n output_folder=output_folder,\n feature_description=feature_description\n )\n \n with mp.Pool(processes=num_workers) as pool:\n arrayrecord_files = pool.map(convert_func, tfrecord_files)\n \n print(f""Conversion complete! Created {len(arrayrecord_files)} ArrayRecord files"")\n return arrayrecord_files\n\n\nclass ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the transformation with processing parameters.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def random_map(self, element: dict, rng: np.random.Generator) -> Any:\n """"""\n Processes a single raw episode from the data source.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes) and 'sequence_length' (int)\n rng: A per-record random number generator provided by the Grain sampler.\n\n Returns:\n A processed video sequence as a NumPy array with shape\n (seq_len, height, width, channels) and dtype float32.\n """"""\n assert isinstance(element, bytes)\n element = pickle.loads(element)\n \n video_shape = (\n element[""sequence_length""],\n self.image_h,\n self.image_w,\n self.image_c,\n )\n episode_tensor = np.frombuffer(element[""raw_video""], dtype=np.uint8)\n episode_tensor = episode_tensor.reshape(video_shape)\n\n current_episode_len = episode_tensor.shape[0]\n if current_episode_len < self.seq_len:\n raise ValueError(f""An episode has length {current_episode_len}, which is ""\n f""shorter than the requested sequence length {self.seq_len}."")\n \n max_start_idx = current_episode_len - self.seq_len\n \n start_idx = rng.integers(0, max_start_idx + 1)\n\n seq = episode_tensor[start_idx : start_idx + self.seq_len]\n\n processed_sequence = seq.astype(np.float32) / 255.0\n\n return processed_sequence\n\n\ndef get_dataloader(\n array_record_paths: list[str],\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n num_workers: int = 1,\n prefetch_buffer_size: int = 1,\n seed: int = 42,\n):\n """"""\n Creates a data loading pipeline using Grain.\n """"""\n if not array_record_paths:\n raise ValueError(""array_record_paths list cannot be empty."")\n\n num_processes = jax.process_count()\n\n if global_batch_size % num_processes != 0:\n raise ValueError(\n f""Global batch size {global_batch_size} must be divisible by ""\n f""the number of JAX processes {num_processes} for proper sharding.""\n )\n per_process_batch_size = global_batch_size // num_processes\n\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\n \n sampler = grain.samplers.IndexSampler(\n num_records=len(source),\n shard_options=grain.sharding.ShardByJaxProcess(drop_remainder=True),\n # FIXME: check whether the global shuffle is the reason why the dataloader is so slow\n shuffle=True,\n num_epochs=100, # FIXME: is there an equivalent to tf.data.repeat(None)?\n",python,content
|
103 |
+
102,1000009,"utils/dataloader.py",832,0,"",python,selection_mouse
|
104 |
+
103,1000041,".gitignore",0,0,"",ignore,tab
|
105 |
+
104,1000365,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
106 |
+
105,1001816,".gitignore",0,0,"",ignore,tab
|
107 |
+
106,1018336,".gitignore",0,0,"Switched from branch 'grain-dataloader' to 'main'",ignore,git_branch_checkout
|
108 |
+
107,1062644,"generate_arrayrecord_dataset.py",0,0,"#!/usr/bin/env python3\n""""""\nGenerate ArrayRecord dataset compatible with train_tokenizer.py\n\nThis script creates synthetic video data and saves it in ArrayRecord format\nthat can be used by the tokenizer training script.\n""""""\n\nimport os\nimport pickle\nimport numpy as np\nimport grain\nfrom array_record.python.array_record_module import ArrayRecordWriter\nimport argparse\nimport multiprocessing as mp\nfrom functools import partial\nimport time\n\n\ndef generate_synthetic_video(\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3\n) -> np.ndarray:\n """"""\n Generate synthetic video data with random frames for training.\n \n Args:\n seq_len: Number of frames in the video sequence\n height: Height of each frame\n width: Width of each frame\n channels: Number of color channels\n \n Returns:\n Video array of shape (seq_len, height, width, channels)\n """"""\n video = np.random.rand(seq_len, height, width, channels).astype(np.float32)\n \n return video\n\n\ndef create_single_arrayrecord_file(\n file_info: tuple,\n output_dir: str,\n seq_len: int,\n height: int,\n width: int,\n channels: int,\n records_per_file: int,\n seed: int\n) -> tuple:\n """"""\n Create a single ArrayRecord file with synthetic video data.\n \n Args:\n file_info: Tuple of (file_idx, start_idx, end_idx)\n output_dir: Directory to save ArrayRecord files\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n \n Returns:\n Tuple of (file_path, num_videos_created, success)\n """"""\n file_idx, start_idx, end_idx = file_info\n videos_in_file = end_idx - start_idx\n \n # Set seed for this process (add file_idx to make each file different)\n np.random.seed(seed + file_idx)\n \n file_path = os.path.join(output_dir, f""videos_{file_idx:04d}.array_record"")\n \n try:\n writer = ArrayRecordWriter(file_path, ""group_size:1"")\n \n for video_idx in range(videos_in_file):\n video = generate_synthetic_video(seq_len, height, width, channels)\n \n # Convert to uint8 format as expected by the dataloader\n video_uint8 = (video * 255).astype(np.uint8)\n \n # Create record in the format expected by ProcessEpisodeAndSlice\n record = {\n ""raw_video"": video_uint8.tobytes(),\n ""sequence_length"": seq_len\n }\n \n writer.write(pickle.dumps(record))\n \n writer.close()\n return (file_path, videos_in_file, True)\n \n except Exception as e:\n print(f""Error creating file {file_path}: {e}"")\n return (file_path, 0, False)\n\n\ndef create_arrayrecord_dataset(\n output_dir: str,\n num_videos: int = 1000,\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3,\n records_per_file: int = 100,\n seed: int = 42,\n num_processes: int | None = None\n):\n """"""\n Create ArrayRecord dataset with synthetic video data using multiprocessing.\n \n Args:\n output_dir: Directory to save ArrayRecord files\n num_videos: Total number of videos to generate\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n num_processes: Number of processes to use (None for auto-detect)\n """"""\n os.makedirs(output_dir, exist_ok=True)\n \n num_files = (num_videos + records_per_file - 1) // records_per_file\n \n print(f""Generating {num_videos} videos across {num_files} ArrayRecord files..."")\n print(f""Each file will contain up to {records_per_file} videos"")\n print(f""Video dimensions: {seq_len} frames × {height}×{width}×{channels}"")\n \n # Prepare file information for each worker\n file_infos = []\n for file_idx in range(num_files):\n start_idx = file_idx * records_per_file\n end_idx = min((file_idx + 1) * records_per_file, num_videos)\n file_infos.append((file_idx, start_idx, end_idx))\n \n # Set number of processes (use CPU count if not specified)\n if num_processes is None:\n num_processes = min(mp.cpu_count(), num_files)\n \n print(f""Using {num_processes} processes for parallel generation..."")\n \n start_time = time.time()\n \n # Create partial function with fixed arguments\n worker_func = partial(\n create_single_arrayrecord_file,\n output_dir=output_dir,\n seq_len=seq_len,\n height=height,\n width=width,\n channels=channels,\n records_per_file=records_per_file,\n seed=seed\n )\n \n # Use multiprocessing to create files in parallel\n with mp.Pool(processes=num_processes) as pool:\n results = pool.map(worker_func, file_infos)\n \n end_time = time.time()\n \n # Process results\n total_records = 0\n successful_files = 0\n \n for file_path, num_videos_created, success in results:\n if success:\n print(f""✓ Created {file_path} with {num_videos_created} videos"")\n total_records += num_videos_created\n successful_files += 1\n else:\n print(f""✗ Failed to create {file_path}"")\n \n print(f""\nDataset generation complete!"")\n print(f""Total videos generated: {total_records}"")\n print(f""Successful files: {successful_files}/{num_files}"")\n print(f""Files created in: {output_dir}"")\n print(f""Generation time: {end_time - start_time:.2f} seconds"")\n\n\ndef verify_dataset(output_dir: str, num_samples: int = 5):\n """"""\n Verify the generated dataset using Grain's ArrayRecordDataSource.\n \n Args:\n output_dir: Directory containing ArrayRecord files\n num_samples: Number of samples to verify\n """"""\n print(f""\nVerifying dataset in {output_dir}..."")\n \n # Find all ArrayRecord files\n array_record_files = [\n os.path.join(output_dir, f) \n for f in os.listdir(output_dir) \n if f.endswith('.array_record')\n ]\n \n if not array_record_files:\n print(""No ArrayRecord files found!"")\n return\n \n print(f""Found {len(array_record_files)} ArrayRecord files"")\n \n # Use Grain's ArrayRecordDataSource as shown in the documentation\n try:\n data_source = grain.sources.ArrayRecordDataSource(array_record_files[0])\n print(f""Number of records in first file: {len(data_source)}"")\n \n # Load and verify a few samples\n for i in range(min(num_samples, len(data_source))):\n record_bytes = data_source[i]\n record = pickle.loads(record_bytes)\n \n # Reconstruct video from raw_video bytes\n video_shape = (record[""sequence_length""], 90, 160, 3) # Hardcoded for now\n video = np.frombuffer(record[""raw_video""], dtype=np.uint8).reshape(video_shape)\n \n print(f"" Record {i}: video shape = {video.shape}, dtype = {video.dtype}"")\n print(f"" Value range: [{video.min()}, {video.max()}]"")\n print(f"" Mean: {video.mean():.1f}"")\n print(f"" Sequence length: {record['sequence_length']}"")\n \n except Exception as e:\n print(f""Error reading ArrayRecord file: {e}"")\n print(""This might indicate a file format issue."")\n\n\ndef main():\n parser = argparse.ArgumentParser(description=""Generate ArrayRecord dataset for tokenizer training"")\n parser.add_argument(""--output_dir"", type=str, default=""data_arrayrecord/dummy"",\n help=""Output directory for ArrayRecord files"")\n parser.add_argument(""--num_videos"", type=int, default=1000,\n help=""Total number of videos to generate"")\n parser.add_argument(""--seq_len"", type=int, default=16,\n help=""Number of frames per video"")\n parser.add_argument(""--height"", type=int, default=90,\n help=""Frame height"")\n parser.add_argument(""--width"", type=int, default=160,\n help=""Frame width"")\n parser.add_argument(""--channels"", type=int, default=3,\n help=""Number of color channels"")\n parser.add_argument(""--records_per_file"", type=int, default=100,\n help=""Number of records per ArrayRecord file"")\n parser.add_argument(""--seed"", type=int, default=42,\n help=""Random seed for reproducibility"")\n parser.add_argument(""--num_processes"", type=int, default=None,\n help=""Number of processes to use (default: auto-detect)"")\n parser.add_argument(""--verify"", action=""store_true"",\n help=""Verify the generated dataset"")\n \n args = parser.parse_args()\n \n # Generate the dataset\n create_arrayrecord_dataset(\n output_dir=args.output_dir,\n num_videos=args.num_videos,\n seq_len=args.seq_len,\n height=args.height,\n width=args.width,\n channels=args.channels,\n records_per_file=args.records_per_file,\n seed=args.seed,\n num_processes=args.num_processes\n )\n \n # Verify if requested\n if args.verify:\n verify_dataset(args.output_dir)\n \nif __name__ == ""__main__"":\n main()\n",python,tab
|
109 |
+
108,1096486,"experiments/tokenizer_cross_node_checkpointing_test.sh",0,0,"",shellscript,tab
|
110 |
+
109,1099987,"experiments/tokenizer_time_measurement_grain.sh",0,0,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\n\ndata_dir='data_arrayrecord/dummy'\n\n# FIXME: the `time_measurement_interval` line is only for debugging and should be removed\nsrun python train_tokenizer.py \\n --time_measurement_interval 10 \\n --batch_size 48 \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
111 |
+
110,1100040,"experiments/tokenizer_time_measurement_grain.sh",0,406,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\n\ndata_dir='data_arrayrecord/dummy'\n\n# FIXME: the `time_measurement_interval` line is only for debugging and should be removed\nsrun python train_tokenizer.py \\n --time_measurement_interval 10 \\n --batch_size 48 \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,selection_command
|
112 |
+
111,1100205,"experiments/tokenizer_time_measurement_grain.sh",405,0,"",shellscript,selection_command
|
113 |
+
112,1100851,"experiments/tokenizer_cross_node_checkpointing_test.sh",0,0,"",shellscript,tab
|
114 |
+
113,1101359,"experiments/tokenizer_cross_node_checkpointing_test.sh",0,0,"#!/usr/bin/env bash\nsource .venv/bin/activate\n\n\ndata_dir='data_arrayrecord/dummy'\n\n# FIXME: the `time_measurement_interval` line is only for debugging and should be removed\nsrun python train_tokenizer.py \\n --time_measurement_interval 10 \\n --batch_size 48 \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,content
|
115 |
+
114,1101556,"experiments/tokenizer_cross_node_checkpointing_test.sh",405,0,"",shellscript,selection_command
|
116 |
+
115,1101889,"experiments/tokenizer_cross_node_checkpointing_test.sh",380,0,"",shellscript,selection_command
|
117 |
+
116,1102080,"experiments/tokenizer_cross_node_checkpointing_test.sh",355,0,"",shellscript,selection_command
|
118 |
+
117,1102082,"experiments/tokenizer_cross_node_checkpointing_test.sh",330,0,"",shellscript,selection_command
|
119 |
+
118,1102118,"experiments/tokenizer_cross_node_checkpointing_test.sh",313,0,"",shellscript,selection_command
|
120 |
+
119,1102145,"experiments/tokenizer_cross_node_checkpointing_test.sh",288,0,"",shellscript,selection_command
|
121 |
+
120,1102179,"experiments/tokenizer_cross_node_checkpointing_test.sh",263,0,"",shellscript,selection_command
|
122 |
+
121,1102212,"experiments/tokenizer_cross_node_checkpointing_test.sh",229,0,"",shellscript,selection_command
|
123 |
+
122,1102245,"experiments/tokenizer_cross_node_checkpointing_test.sh",196,0,"",shellscript,selection_command
|
124 |
+
123,1102279,"experiments/tokenizer_cross_node_checkpointing_test.sh",106,0,"",shellscript,selection_command
|
125 |
+
124,1102316,"experiments/tokenizer_cross_node_checkpointing_test.sh",82,0,"",shellscript,selection_command
|
126 |
+
125,1102447,"experiments/tokenizer_cross_node_checkpointing_test.sh",71,0,"",shellscript,selection_command
|
127 |
+
126,1102634,"experiments/tokenizer_cross_node_checkpointing_test.sh",47,0,"",shellscript,selection_command
|
128 |
+
127,1102791,"experiments/tokenizer_cross_node_checkpointing_test.sh",71,0,"",shellscript,selection_command
|
129 |
+
128,1110387,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,0,"",shellscript,selection_command
|
130 |
+
129,1110528,"experiments/tokenizer_cross_node_checkpointing_test.sh",56,0,"",shellscript,selection_command
|
131 |
+
130,1110801,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,0,"",shellscript,selection_command
|
132 |
+
131,1111074,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,1,"d",shellscript,selection_command
|
133 |
+
132,1111133,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,16,"data_arrayrecord",shellscript,selection_command
|
134 |
+
133,1111319,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,17,"data_arrayrecord/",shellscript,selection_command
|
135 |
+
134,1111463,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,22,"data_arrayrecord/dummy",shellscript,selection_command
|
136 |
+
135,1117872,"experiments/tokenizer_cross_node_checkpointing_test.sh",82,0,"",shellscript,selection_mouse
|
137 |
+
136,1118489,"experiments/tokenizer_cross_node_checkpointing_test.sh",48,0,"",shellscript,selection_command
|
138 |
+
137,1118660,"experiments/tokenizer_cross_node_checkpointing_test.sh",56,0,"",shellscript,selection_command
|
139 |
+
138,1118777,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,0,"",shellscript,selection_command
|
140 |
+
139,1118919,"experiments/tokenizer_cross_node_checkpointing_test.sh",74,0,"",shellscript,selection_command
|
141 |
+
140,1119567,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,22,"",shellscript,content
|
142 |
+
141,1120989,"experiments/tokenizer_cross_node_checkpointing_test.sh",58,0,"d",shellscript,content
|
143 |
+
142,1120990,"experiments/tokenizer_cross_node_checkpointing_test.sh",59,0,"",shellscript,selection_keyboard
|
144 |
+
143,1121070,"experiments/tokenizer_cross_node_checkpointing_test.sh",59,0,"a",shellscript,content
|
145 |
+
144,1121071,"experiments/tokenizer_cross_node_checkpointing_test.sh",60,0,"",shellscript,selection_keyboard
|
146 |
+
145,1121134,"experiments/tokenizer_cross_node_checkpointing_test.sh",60,0,"t",shellscript,content
|
147 |
+
146,1121135,"experiments/tokenizer_cross_node_checkpointing_test.sh",61,0,"",shellscript,selection_keyboard
|
148 |
+
147,1121282,"experiments/tokenizer_cross_node_checkpointing_test.sh",61,0,"a",shellscript,content
|
149 |
+
148,1121283,"experiments/tokenizer_cross_node_checkpointing_test.sh",62,0,"",shellscript,selection_keyboard
|
150 |
+
149,1121512,"experiments/tokenizer_cross_node_checkpointing_test.sh",62,0,"_",shellscript,content
|
151 |
+
150,1121513,"experiments/tokenizer_cross_node_checkpointing_test.sh",63,0,"",shellscript,selection_keyboard
|
152 |
+
151,1121694,"experiments/tokenizer_cross_node_checkpointing_test.sh",63,0,"t",shellscript,content
|
153 |
+
152,1121695,"experiments/tokenizer_cross_node_checkpointing_test.sh",64,0,"",shellscript,selection_keyboard
|
154 |
+
153,1121840,"experiments/tokenizer_cross_node_checkpointing_test.sh",64,0,"f",shellscript,content
|
155 |
+
154,1121840,"experiments/tokenizer_cross_node_checkpointing_test.sh",65,0,"",shellscript,selection_keyboard
|
156 |
+
155,1122344,"experiments/tokenizer_cross_node_checkpointing_test.sh",65,0,"r",shellscript,content
|
157 |
+
156,1122345,"experiments/tokenizer_cross_node_checkpointing_test.sh",66,0,"",shellscript,selection_keyboard
|
158 |
+
157,1122437,"experiments/tokenizer_cross_node_checkpointing_test.sh",66,0,"e",shellscript,content
|
159 |
+
158,1122438,"experiments/tokenizer_cross_node_checkpointing_test.sh",67,0,"",shellscript,selection_keyboard
|
160 |
+
159,1122606,"experiments/tokenizer_cross_node_checkpointing_test.sh",67,0,"c",shellscript,content
|
161 |
+
160,1122607,"experiments/tokenizer_cross_node_checkpointing_test.sh",68,0,"",shellscript,selection_keyboard
|
162 |
+
161,1122749,"experiments/tokenizer_cross_node_checkpointing_test.sh",68,0,"o",shellscript,content
|
163 |
+
162,1122750,"experiments/tokenizer_cross_node_checkpointing_test.sh",69,0,"",shellscript,selection_keyboard
|
164 |
+
163,1122855,"experiments/tokenizer_cross_node_checkpointing_test.sh",69,0,"r",shellscript,content
|
165 |
+
164,1122856,"experiments/tokenizer_cross_node_checkpointing_test.sh",70,0,"",shellscript,selection_keyboard
|
166 |
+
165,1122892,"experiments/tokenizer_cross_node_checkpointing_test.sh",70,0,"d",shellscript,content
|
167 |
+
166,1122892,"experiments/tokenizer_cross_node_checkpointing_test.sh",71,0,"",shellscript,selection_keyboard
|
168 |
+
167,1122950,"experiments/tokenizer_cross_node_checkpointing_test.sh",71,0,"s",shellscript,content
|
169 |
+
168,1122951,"experiments/tokenizer_cross_node_checkpointing_test.sh",72,0,"",shellscript,selection_keyboard
|
170 |
+
169,1123161,"experiments/tokenizer_cross_node_checkpointing_test.sh",72,0,"/",shellscript,content
|
171 |
+
170,1123161,"experiments/tokenizer_cross_node_checkpointing_test.sh",73,0,"",shellscript,selection_keyboard
|
172 |
+
171,1123264,"experiments/tokenizer_cross_node_checkpointing_test.sh",72,0,"",shellscript,selection_command
|
173 |
+
172,1124458,"experiments/tokenizer_cross_node_checkpointing_test.sh",72,1,"",shellscript,content
|
174 |
+
173,1126526,"experiments/tokenizer_cross_node_checkpointing_test.sh",72,0,"/",shellscript,content
|
175 |
+
174,1126529,"experiments/tokenizer_cross_node_checkpointing_test.sh",73,0,"",shellscript,selection_keyboard
|
176 |
+
175,1126530,"experiments/tokenizer_cross_node_checkpointing_test.sh",72,0,"",shellscript,selection_command
|
177 |
+
176,1127573,"experiments/tokenizer_cross_node_checkpointing_test.sh",72,1,"",shellscript,content
|
178 |
+
177,1128315,"experiments/tokenizer_cross_node_checkpointing_test.sh",74,0,"",shellscript,selection_command
|
179 |
+
178,1128405,"experiments/tokenizer_cross_node_checkpointing_test.sh",99,0,"",shellscript,selection_command
|
180 |
+
179,1131243,"experiments/tokenizer_cross_node_checkpointing_test.sh",75,90,"",shellscript,content
|
181 |
+
180,1131585,"experiments/tokenizer_cross_node_checkpointing_test.sh",108,0,"",shellscript,selection_command
|
182 |
+
181,1131863,"experiments/tokenizer_cross_node_checkpointing_test.sh",108,37,"",shellscript,content
|
183 |
+
182,1131871,"experiments/tokenizer_cross_node_checkpointing_test.sh",112,0,"",shellscript,selection_command
|
184 |
+
183,1132063,"experiments/tokenizer_cross_node_checkpointing_test.sh",79,0,"",shellscript,selection_command
|
185 |
+
184,1132391,"experiments/tokenizer_cross_node_checkpointing_test.sh",80,0,"",shellscript,selection_command
|
186 |
+
185,1132613,"experiments/tokenizer_cross_node_checkpointing_test.sh",113,0,"",shellscript,selection_command
|
187 |
+
186,1132906,"experiments/tokenizer_cross_node_checkpointing_test.sh",135,0,"",shellscript,selection_command
|
188 |
+
187,1133661,"experiments/tokenizer_cross_node_checkpointing_test.sh",160,0,"",shellscript,selection_command
|
189 |
+
188,1133921,"experiments/tokenizer_cross_node_checkpointing_test.sh",187,0,"",shellscript,selection_command
|
190 |
+
189,1133936,"experiments/tokenizer_cross_node_checkpointing_test.sh",202,0,"",shellscript,selection_command
|
191 |
+
190,1133969,"experiments/tokenizer_cross_node_checkpointing_test.sh",227,0,"",shellscript,selection_command
|
192 |
+
191,1134004,"experiments/tokenizer_cross_node_checkpointing_test.sh",252,0,"",shellscript,selection_command
|
193 |
+
192,1134595,"experiments/tokenizer_cross_node_checkpointing_test.sh",227,0,"",shellscript,selection_command
|
194 |
+
193,1134853,"experiments/tokenizer_cross_node_checkpointing_test.sh",202,0,"",shellscript,selection_command
|
195 |
+
194,1134871,"experiments/tokenizer_cross_node_checkpointing_test.sh",187,0,"",shellscript,selection_command
|
196 |
+
195,1134979,"experiments/tokenizer_cross_node_checkpointing_test.sh",160,0,"",shellscript,selection_command
|
197 |
+
196,1134981,"experiments/tokenizer_cross_node_checkpointing_test.sh",135,0,"",shellscript,selection_command
|
198 |
+
197,1135046,"experiments/tokenizer_cross_node_checkpointing_test.sh",113,0,"",shellscript,selection_command
|
199 |
+
198,1136383,"experiments/tokenizer_cross_node_checkpointing_test.sh",113,0,"",shellscript,selection_command
|
200 |
+
199,1136520,"experiments/tokenizer_cross_node_checkpointing_test.sh",114,0,"",shellscript,selection_command
|
201 |
+
200,1136711,"experiments/tokenizer_cross_node_checkpointing_test.sh",125,0,"",shellscript,selection_command
|
202 |
+
201,1136907,"experiments/tokenizer_cross_node_checkpointing_test.sh",128,0,"",shellscript,selection_command
|
203 |
+
202,1137641,"experiments/tokenizer_cross_node_checkpointing_test.sh",125,0,"",shellscript,selection_command
|
204 |
+
203,1137791,"experiments/tokenizer_cross_node_checkpointing_test.sh",114,0,"",shellscript,selection_command
|
205 |
+
204,1138172,"experiments/tokenizer_cross_node_checkpointing_test.sh",125,0,"",shellscript,selection_command
|
206 |
+
205,1143530,"experiments/tokenizer_cross_node_checkpointing_test.sh",125,2,"",shellscript,content
|
207 |
+
206,1143727,"experiments/tokenizer_cross_node_checkpointing_test.sh",125,0,"2",shellscript,content
|
208 |
+
207,1143727,"experiments/tokenizer_cross_node_checkpointing_test.sh",126,0,"",shellscript,selection_keyboard
|
209 |
+
208,1143784,"experiments/tokenizer_cross_node_checkpointing_test.sh",126,0,"4",shellscript,content
|
210 |
+
209,1143785,"experiments/tokenizer_cross_node_checkpointing_test.sh",127,0,"",shellscript,selection_keyboard
|
211 |
+
210,1144074,"experiments/tokenizer_cross_node_checkpointing_test.sh",126,0,"",shellscript,selection_command
|
212 |
+
211,1147732,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n seed=args.seed,\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in dataloader) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
213 |
+
212,1148136,"train_tokenizer.py",0,0,"",python,selection_command
|
214 |
+
213,1149197,"train_tokenizer.py",321,0,"",python,selection_command
|
215 |
+
214,1149921,"train_tokenizer.py",345,0,"",python,selection_command
|
216 |
+
215,1150231,"train_tokenizer.py",876,0,"",python,selection_command
|
217 |
+
216,1150404,"train_tokenizer.py",1543,0,"",python,selection_command
|
218 |
+
217,1150890,"train_tokenizer.py",876,0,"",python,selection_command
|
219 |
+
218,1151918,"train_tokenizer.py",1543,0,"",python,selection_command
|
220 |
+
219,1153375,"train_tokenizer.py",1520,0,"",python,selection_command
|
221 |
+
220,1153938,"train_tokenizer.py",1516,0,"",python,selection_command
|
222 |
+
221,1154015,"train_tokenizer.py",1516,1,"c",python,selection_command
|
223 |
+
222,1154122,"train_tokenizer.py",1516,8,"ckpt_dir",python,selection_command
|
224 |
+
223,1154212,"train_tokenizer.py",1516,0,"",python,selection_command
|
225 |
+
224,1156641,"experiments/tokenizer_cross_node_checkpointing_test.sh",0,0,"",shellscript,tab
|
226 |
+
225,1157702,"experiments/tokenizer_cross_node_checkpointing_test.sh",129,0,"\n ",shellscript,content
|
227 |
+
226,1158257,"experiments/tokenizer_cross_node_checkpointing_test.sh",130,4,"",shellscript,content
|
228 |
+
227,1158405,"experiments/tokenizer_cross_node_checkpointing_test.sh",130,0,"ckpt_dir",shellscript,content
|
229 |
+
228,1158408,"experiments/tokenizer_cross_node_checkpointing_test.sh",137,0,"",shellscript,selection_command
|
230 |
+
229,1158930,"experiments/tokenizer_cross_node_checkpointing_test.sh",130,0,"",shellscript,selection_command
|
231 |
+
230,1159003,"experiments/tokenizer_cross_node_checkpointing_test.sh",138,0," checkpoints/tokenizer_cross_node_checkpointing_test \",shellscript,content
|
232 |
+
231,1159003,"experiments/tokenizer_cross_node_checkpointing_test.sh",130,0," --",shellscript,content
|
233 |
+
232,1159005,"experiments/tokenizer_cross_node_checkpointing_test.sh",198,0,"",shellscript,selection_command
|
234 |
+
233,1159837,"experiments/tokenizer_cross_node_checkpointing_test.sh",197,0,"",shellscript,selection_command
|
235 |
+
234,1160031,"experiments/tokenizer_cross_node_checkpointing_test.sh",157,0,"",shellscript,selection_command
|
236 |
+
235,1160307,"experiments/tokenizer_cross_node_checkpointing_test.sh",156,0,"",shellscript,selection_command
|
237 |
+
236,1160315,"experiments/tokenizer_cross_node_checkpointing_test.sh",145,0,"",shellscript,selection_command
|
238 |
+
237,1160669,"experiments/tokenizer_cross_node_checkpointing_test.sh",156,0,"",shellscript,selection_command
|
239 |
+
238,1160832,"experiments/tokenizer_cross_node_checkpointing_test.sh",157,0,"",shellscript,selection_command
|
240 |
+
239,1161032,"experiments/tokenizer_cross_node_checkpointing_test.sh",197,0,"",shellscript,selection_command
|
241 |
+
240,1161156,"experiments/tokenizer_cross_node_checkpointing_test.sh",203,0,"",shellscript,selection_command
|
242 |
+
241,1161551,"experiments/tokenizer_cross_node_checkpointing_test.sh",197,0,"",shellscript,selection_command
|
243 |
+
242,1175160,"experiments/tokenizer_cross_node_checkpointing_test.sh",166,0,"",shellscript,selection_mouse
|
244 |
+
243,1176327,"experiments/tokenizer_cross_node_checkpointing_test.sh",157,0,"",shellscript,selection_command
|
245 |
+
244,1176510,"experiments/tokenizer_cross_node_checkpointing_test.sh",197,0,"",shellscript,selection_command
|
246 |
+
245,1176691,"experiments/tokenizer_cross_node_checkpointing_test.sh",203,0,"",shellscript,selection_command
|
247 |
+
246,1178426,"experiments/tokenizer_cross_node_checkpointing_test.sh",134,0,"",shellscript,selection_command
|
248 |
+
247,1178747,"experiments/tokenizer_cross_node_checkpointing_test.sh",198,0,"\n ",shellscript,content
|
249 |
+
248,1179447,"experiments/tokenizer_cross_node_checkpointing_test.sh",203,0,"-",shellscript,content
|
250 |
+
249,1179448,"experiments/tokenizer_cross_node_checkpointing_test.sh",204,0,"",shellscript,selection_keyboard
|
251 |
+
250,1179582,"experiments/tokenizer_cross_node_checkpointing_test.sh",204,0,"-",shellscript,content
|
252 |
+
251,1179583,"experiments/tokenizer_cross_node_checkpointing_test.sh",205,0,"",shellscript,selection_keyboard
|
253 |
+
252,1179962,"experiments/tokenizer_cross_node_checkpointing_test.sh",205,0,"log_checkpoint_interval 10000 \",shellscript,content
|
254 |
+
253,1180203,"experiments/tokenizer_cross_node_checkpointing_test.sh",235,0,"",shellscript,selection_command
|
255 |
+
254,1180737,"train_tokenizer.py",0,0,"",python,tab
|
256 |
+
255,1182128,"train_tokenizer.py",1539,0,"",python,selection_command
|
257 |
+
256,1182682,"experiments/tokenizer_cross_node_checkpointing_test.sh",0,0,"",shellscript,tab
|
258 |
+
257,1183280,"experiments/tokenizer_cross_node_checkpointing_test.sh",229,0,"",shellscript,selection_command
|
259 |
+
258,1184039,"experiments/tokenizer_cross_node_checkpointing_test.sh",229,5,"",shellscript,content
|
260 |
+
259,1184259,"experiments/tokenizer_cross_node_checkpointing_test.sh",229,0,"1",shellscript,content
|
261 |
+
260,1184260,"experiments/tokenizer_cross_node_checkpointing_test.sh",230,0,"",shellscript,selection_keyboard
|
262 |
+
261,1184351,"experiments/tokenizer_cross_node_checkpointing_test.sh",230,0,"0",shellscript,content
|
263 |
+
262,1184351,"experiments/tokenizer_cross_node_checkpointing_test.sh",231,0,"",shellscript,selection_keyboard
|
264 |
+
263,1184456,"experiments/tokenizer_cross_node_checkpointing_test.sh",230,0,"",shellscript,selection_command
|
265 |
+
264,1185468,"experiments/tokenizer_cross_node_checkpointing_test.sh",199,0,"",shellscript,selection_command
|
266 |
+
265,1194648,"TERMINAL",0,0,"b",,terminal_output
|
267 |
+
266,1194778,"TERMINAL",0,0,"as",,terminal_output
|
268 |
+
267,1194884,"TERMINAL",0,0,"h",,terminal_output
|
269 |
+
268,1195014,"TERMINAL",0,0," e",,terminal_output
|
270 |
+
269,1195229,"TERMINAL",0,0,"xp",,terminal_output
|
271 |
+
270,1195414,"TERMINAL",0,0,"er",,terminal_output
|
272 |
+
271,1195480,"TERMINAL",0,0,"i",,terminal_output
|
273 |
+
272,1195561,"TERMINAL",0,0,"m",,terminal_output
|
274 |
+
273,1195704,"TERMINAL",0,0,"ents/",,terminal_output
|
275 |
+
274,1195979,"TERMINAL",0,0,"tok",,terminal_output
|
276 |
+
275,1196085,"TERMINAL",0,0,"enizer_",,terminal_output
|
277 |
+
276,1197093,"TERMINAL",0,0,"c",,terminal_output
|
278 |
+
277,1197185,"TERMINAL",0,0,"h",,terminal_output
|
279 |
+
278,1197356,"TERMINAL",0,0,"",,terminal_output
|
280 |
+
279,1198951,"TERMINAL",0,0,"[K",,terminal_output
|
281 |
+
280,1199518,"TERMINAL",0,0,"r",,terminal_output
|
282 |
+
281,1249444,"TERMINAL",0,0,"oss_node_checkpointing_test.sh ",,terminal_output
|
283 |
+
282,2381813,"experiments/tokenizer_cross_node_checkpointing_test.sh",0,0,"Switched from branch 'main' to 'fix-distributed-checkpointing'",shellscript,git_branch_checkout
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-823236e5-d9b8-4c96-ab4a-24b8972648001754120306348-2025_08_02-09.38.34.668/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-847ad052-cbff-45db-a343-a2c6b2d212411753383716687-2025_07_24-21.02.30.722/source.csv
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,3,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(model, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(model, optimizer, inputs):\n """"""Update state and compute metrics""""""\n\n def loss_fn(model):\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(genie, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(genie, tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.device_put(model_state, replicated_sharding)\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.device_put(optimizer_state, replicated_sharding)\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_genie_components(\n optimizer, replicated_sharding, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(genie, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
3 |
+
2,818,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:02:30 PM [info] Activating crowd-code\n9:02:30 PM [info] Recording started\n9:02:30 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,1049,"train_dynamics.py",0,0,"",python,tab
|
5 |
+
4,1061,"TERMINAL",0,0,"bash",,terminal_focus
|
6 |
+
5,61246,"genie.py",0,0,"from typing import Dict, Any\n\nimport optax\nimport jax\nimport jax.numpy as jnp\nimport flax.nnx as nnx\nfrom flax.training.train_state import TrainState\nimport orbax.checkpoint as ocp\n\nfrom models.dynamics import DynamicsMaskGIT\nfrom models.lam import LatentActionModel\nfrom models.tokenizer import TokenizerVQVAE\n\nimport grain\n\n\nclass Genie(nnx.Module):\n """"""Genie model""""""\n\n def __init__(\n self,\n in_dim: int,\n tokenizer_dim: int,\n tokenizer_ffn_dim: int,\n latent_patch_dim: int,\n num_patch_latents: int,\n patch_size: int,\n tokenizer_num_blocks: int,\n tokenizer_num_heads: int,\n lam_dim: int,\n lam_ffn_dim: int,\n latent_action_dim: int,\n num_latent_actions: int,\n lam_patch_size: int,\n lam_num_blocks: int,\n lam_num_heads: int,\n lam_co_train: bool,\n dyna_dim: int,\n dyna_ffn_dim: int,\n dyna_num_blocks: int,\n dyna_num_heads: int,\n param_dtype: jnp.dtype,\n dtype: jnp.dtype,\n use_flash_attention: bool,\n rngs: nnx.Rngs,\n dropout: float = 0.0,\n mask_limit: float = 0.0,\n ):\n # --- Tokenizer ---\n self.in_dim = in_dim\n self.tokenizer_dim = tokenizer_dim\n self.tokenizer_ffn_dim = tokenizer_ffn_dim\n self.latent_patch_dim = latent_patch_dim\n self.num_patch_latents = num_patch_latents\n self.patch_size = patch_size\n self.tokenizer_num_blocks = tokenizer_num_blocks\n self.tokenizer_num_heads = tokenizer_num_heads\n # --- LAM ---\n self.lam_dim = lam_dim\n self.lam_ffn_dim = lam_ffn_dim\n self.latent_action_dim = latent_action_dim\n self.num_latent_actions = num_latent_actions\n self.lam_patch_size = lam_patch_size\n self.lam_num_blocks = lam_num_blocks\n self.lam_num_heads = lam_num_heads\n self.lam_co_train = lam_co_train\n # --- Dynamics ---\n self.dyna_dim = dyna_dim\n self.dyna_ffn_dim = dyna_ffn_dim\n self.dyna_num_blocks = dyna_num_blocks\n self.dyna_num_heads = dyna_num_heads\n self.param_dtype = param_dtype\n self.dtype = dtype\n self.use_flash_attention = use_flash_attention\n self.dropout = dropout\n self.mask_limit = mask_limit\n\n self.tokenizer = TokenizerVQVAE(\n in_dim=self.in_dim,\n model_dim=self.tokenizer_dim,\n ffn_dim=self.tokenizer_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_patch_latents,\n patch_size=self.patch_size,\n num_blocks=self.tokenizer_num_blocks,\n num_heads=self.tokenizer_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.lam = LatentActionModel(\n in_dim=self.in_dim,\n model_dim=self.lam_dim,\n ffn_dim=self.lam_ffn_dim,\n latent_dim=self.latent_patch_dim,\n num_latents=self.num_latent_actions,\n patch_size=self.lam_patch_size,\n num_blocks=self.lam_num_blocks,\n num_heads=self.lam_num_heads,\n dropout=0.0,\n codebook_dropout=0.0,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n self.dynamics = DynamicsMaskGIT(\n model_dim=self.dyna_dim,\n ffn_dim=self.dyna_ffn_dim,\n num_latents=self.num_patch_latents,\n latent_action_dim=self.latent_action_dim,\n num_blocks=self.dyna_num_blocks,\n num_heads=self.dyna_num_heads,\n dropout=self.dropout,\n mask_limit=self.mask_limit,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n use_flash_attention=self.use_flash_attention,\n rngs=rngs,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n tokenizer_outputs = self.tokenizer.vq_encode(batch[""videos""], training=False)\n lam_outputs = self.lam.vq_encode(batch[""videos""], training=False)\n latent_actions = jax.lax.cond(\n self.lam_co_train,\n lambda: lam_outputs[""z_q""],\n lambda: jax.lax.stop_gradient(lam_outputs[""z_q""]),\n )\n outputs = dict(\n video_tokens=jax.lax.stop_gradient(tokenizer_outputs[""indices""]),\n latent_actions=latent_actions,\n )\n outputs[""mask_rng""] = batch[""mask_rng""]\n dyna_outputs = self.dynamics(outputs, training)\n outputs.update(dyna_outputs)\n mle_indices = jnp.argmax(outputs[""token_logits""], axis=-1)\n outputs[""recon""] = self.tokenizer.decode(\n mle_indices, batch[""videos""].shape[2:4]\n )\n outputs[""lam_indices""] = lam_outputs[""indices""]\n return outputs\n\n def sample(\n self,\n batch: Dict[str, Any],\n seq_len: int,\n steps: int = 25,\n temperature: float = 1,\n sample_argmax: bool = False,\n ) -> Any:\n """"""\n Autoregressively samples up to `seq_len` future frames, following Figure 8 of the paper.\n\n - Input frames are tokenized once.\n - Future frames are generated autoregressively in token space.\n - All frames are detokenized in a single pass.\n\n Note:\n - For interactive or step-wise sampling, detokenization should occur after each action.\n - To maintain consistent tensor shapes across timesteps, all current and future frames are decoded at every step.\n - Temporal causal structure is preserved by\n a) reapplying the mask before each decoding step.\n b) a temporal causal mask is applied within each ST-transformer block.\n\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: patches per frame\n S: sequence length\n A: action space\n D: model latent dimension\n """"""\n # --- Encode videos and actions ---\n tokenizer_out = self.tokenizer.vq_encode(batch[""videos""], training=False)\n token_idxs = tokenizer_out[""indices""] # (B, T, N)\n B, T, N = token_idxs.shape\n pad_shape = (B, seq_len - T, N)\n pad = jnp.zeros(pad_shape, dtype=token_idxs.dtype)\n token_idxs = jnp.concatenate([token_idxs, pad], axis=1) # (B, S, N)\n action_tokens = self.lam.vq.get_codes(batch[""latent_actions""])\n\n maskgit_step = MaskGITStep(\n dynamics=self.dynamics,\n tokenizer=self.tokenizer,\n temperature=temperature,\n sample_argmax=sample_argmax,\n steps=steps,\n )\n\n def maskgit_loop_fn(carry, module, x):\n return module(carry, x)\n\n scanned_maskgit_loop = nnx.scan(\n maskgit_loop_fn,\n in_axes=(nnx.Carry, None, 0),\n out_axes=(nnx.Carry, 0),\n length=steps,\n )\n\n def generation_step_fn(carry, step_t):\n rng, current_token_idxs = carry\n rng, step_rng = jax.random.split(rng)\n\n # Mask current and future frames (i.e., t >= step_t)\n mask = jnp.arange(seq_len) >= step_t # (S,)\n mask = jnp.broadcast_to(mask[None, :, None], (B, seq_len, N)) # (B, S, N)\n mask = mask.astype(bool)\n masked_token_idxs = current_token_idxs * ~mask\n\n # --- Initialize and run MaskGIT loop ---\n init_carry_maskgit = (\n step_rng,\n masked_token_idxs,\n mask,\n action_tokens,\n )\n # FIXME (f.srambical): test whether sampling works with this\n final_carry_maskgit, _ = scanned_maskgit_loop(\n init_carry_maskgit, maskgit_step, jnp.arange(steps)\n )\n final_carry_maskgit = carry\n updated_token_idxs = final_carry_maskgit[1]\n new_carry = (rng, updated_token_idxs)\n return new_carry, None\n\n # --- Run the autoregressive generation using scan ---\n initial_carry = (batch[""rng""], token_idxs)\n timesteps_to_scan = jnp.arange(T, seq_len)\n final_carry, _ = jax.lax.scan(\n generation_step_fn, initial_carry, timesteps_to_scan\n )\n final_token_idxs = final_carry[1]\n\n # --- Decode all tokens at once at the end ---\n final_frames = self.tokenizer.decode(\n final_token_idxs,\n video_hw=batch[""videos""].shape[2:4],\n )\n return final_frames\n\n def vq_encode(self, batch, training) -> Dict[str, Any]:\n # --- Preprocess videos ---\n lam_output = self.lam.vq_encode(batch[""videos""], training=training)\n return lam_output[""indices""]\n\n\nclass MaskGITStep(nnx.Module):\n def __init__(\n self,\n dynamics: DynamicsMaskGIT,\n tokenizer: TokenizerVQVAE,\n temperature: float,\n sample_argmax: bool,\n steps: int,\n ):\n self.dynamics = dynamics\n self.tokenizer = tokenizer\n self.temperature = temperature\n self.sample_argmax = sample_argmax\n self.steps = steps\n\n def __call__(self, carry, x):\n rng, token_idxs, mask, action_tokens = carry\n step = x\n N = token_idxs.shape[2]\n\n # --- Construct + encode video ---\n vid_embed = self.dynamics.patch_embed(token_idxs) # (B, S, N, D)\n mask_token = self.dynamics.mask_token.value # (1, 1, 1, D,)\n mask_expanded = mask[..., None] # (B, S, N, 1)\n vid_embed = jnp.where(mask_expanded, mask_token, vid_embed)\n\n # --- Predict transition ---\n act_embed = self.dynamics.action_up(action_tokens)\n vid_embed += jnp.pad(act_embed, ((0, 0), (1, 0), (0, 0), (0, 0)))\n unmasked_ratio = jnp.cos(jnp.pi * (step + 1) / (self.steps * 2))\n step_temp = self.temperature * (1.0 - unmasked_ratio)\n final_logits = self.dynamics.dynamics(vid_embed) / step_temp\n\n # --- Sample new tokens for final frame ---\n if self.sample_argmax:\n sampled_token_idxs = jnp.argmax(final_logits, axis=-1)\n else:\n rng, _rng = jax.random.split(rng)\n sampled_token_idxs = jax.random.categorical(_rng, final_logits)\n gather_fn = jax.vmap(jax.vmap(jax.vmap(lambda x, y: x[y])))\n final_token_probs = gather_fn(jax.nn.softmax(final_logits), sampled_token_idxs)\n final_token_probs += ~mask\n # Update masked tokens only\n token_idxs = jnp.where(mask, sampled_token_idxs, token_idxs)\n\n # --- Update mask ---\n num_unmasked_tokens = jnp.round(N * (1.0 - unmasked_ratio)).astype(int)\n idx_mask = jnp.arange(final_token_probs.shape[-1]) > num_unmasked_tokens\n sorted_idxs = jnp.argsort(final_token_probs, axis=-1, descending=True)\n mask_update_fn = jax.vmap(lambda msk, ids: msk.at[ids].set(idx_mask))\n new_mask = mask_update_fn(mask, sorted_idxs)\n\n new_carry = (rng, token_idxs, new_mask, action_tokens)\n return new_carry, None\n\n\n# FIXME (f.srambical): add conversion script for old checkpoints\ndef restore_genie_components(\n optimizer: nnx.Optimizer,\n sharding: jax.sharding.NamedSharding,\n rng: jax.Array,\n args,\n):\n """"""Restore pre-trained Genie components""""""\n rngs = nnx.Rngs(rng)\n\n # dummy values since we only use tx to initialize the dummy train states\n dummy_tx = optax.adamw(\n learning_rate=optax.constant_schedule(args.max_lr),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n )\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n tokenizer_checkpoint_manager = ocp.CheckpointManager(\n directory=args.tokenizer_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.tokenizer_dim,\n ffn_dim=args.tokenizer_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n num_blocks=args.tokenizer_num_blocks,\n num_heads=args.tokenizer_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_tokenizer_optimizer = nnx.Optimizer(dummy_tokenizer, dummy_tx)\n dummy_tokenizer_optimizer_state = nnx.state(dummy_tokenizer_optimizer)\n abstract_sharded_tokenizer_optimizer_state = _create_abstract_sharded_pytree(\n dummy_tokenizer_optimizer_state, sharding\n )\n restored_tokenizer = tokenizer_checkpoint_manager.restore(\n step=tokenizer_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(\n abstract_sharded_tokenizer_optimizer_state\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_tokenizer_optimizer.model, restored_tokenizer.model)\n optimizer.model.tokenizer = dummy_tokenizer_optimizer.model\n tokenizer_checkpoint_manager.close()\n\n if args.lam_checkpoint:\n lam_checkpoint_manager = ocp.CheckpointManager(\n directory=args.lam_checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n dummy_lam = LatentActionModel(\n in_dim=args.image_channels,\n model_dim=args.lam_dim,\n ffn_dim=args.lam_ffn_dim,\n latent_dim=args.latent_patch_dim,\n num_latents=args.num_latent_actions,\n patch_size=args.lam_patch_size,\n num_blocks=args.lam_num_blocks,\n num_heads=args.lam_num_heads,\n dropout=args.dropout,\n codebook_dropout=args.dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n dummy_lam_optimizer = nnx.Optimizer(dummy_lam, dummy_tx)\n dummy_lam_optimizer_state = nnx.state(dummy_lam_optimizer)\n abstract_sharded_lam_optimizer_state = _create_abstract_sharded_pytree(\n dummy_lam_optimizer_state, sharding\n )\n restored_lam_optimizer = lam_checkpoint_manager.restore(\n step=lam_checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(\n abstract_sharded_lam_optimizer_state\n ),\n ),\n )[""model_state""]\n nnx.update(dummy_lam_optimizer.model, restored_lam_optimizer.model)\n optimizer.model.lam = dummy_lam_optimizer.model\n # Remove the LAM decoder to save memory and avoid unnecessary computation.\n del optimizer.model.lam.decoder\n lam_checkpoint_manager.close()\n\n return optimizer\n\n\ndef _create_abstract_sharded_pytree(pytree_template, sharding_spec):\n """"""Replaces arrays in a pytree with ShapeDtypeStructs having the given sharding.""""""\n\n def map_fn(leaf_template):\n if hasattr(leaf_template, ""shape"") and hasattr(leaf_template, ""dtype""):\n return jax.ShapeDtypeStruct(\n leaf_template.shape, leaf_template.dtype, sharding=sharding_spec\n )\n return leaf_template\n\n return jax.tree_util.tree_map(map_fn, pytree_template)\n",python,tab
|
7 |
+
6,66037,"genie.py",0,0,"",python,selection_command
|
8 |
+
7,76889,"genie.py",2749,0,"",python,selection_command
|
9 |
+
8,79971,"train_dynamics.py",0,0,"",python,tab
|
10 |
+
9,83215,"train_dynamics.py",0,0,"",python,selection_command
|
11 |
+
10,88186,"train_dynamics.py",371,0,"",python,selection_command
|
12 |
+
11,91116,"train_dynamics.py",377,0,"",python,selection_command
|
13 |
+
12,91330,"train_dynamics.py",384,0,"",python,selection_command
|
14 |
+
13,91580,"train_dynamics.py",5260,0,"",python,selection_command
|
15 |
+
14,92840,"train_dynamics.py",5266,0,"",python,selection_command
|
16 |
+
15,93026,"train_dynamics.py",5268,0,"",python,selection_command
|
17 |
+
16,109627,"genie.py",0,0,"",python,tab
|
18 |
+
17,119781,"train_dynamics.py",0,0,"",python,tab
|
19 |
+
18,119787,"genie.py",0,0,"",python,tab
|
20 |
+
19,157100,"genie.py",14249,0,"",python,selection_command
|
21 |
+
20,162875,"genie.py",14265,0,"",python,selection_command
|
22 |
+
21,162876,"genie.py",14266,0,"",python,selection_command
|
23 |
+
22,162877,"genie.py",14270,0,"",python,selection_command
|
24 |
+
23,162878,"genie.py",14271,0,"",python,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-895267d6-5fbc-45e8-bc56-0d7c756881181750708632303-2025_06_23-12.57.13.921/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-8a8d866a-4152-4c47-8a5f-a8a6a4c71f3e1753768819993-2025_07_29-08.00.29.05/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-a775c5ce-801a-4b55-897a-6c0b6f3448081754127102402-2025_08_02-11.31.50.147/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-ac1e2da2-a2d2-4327-aaa3-5900bc2b3a561753469517123-2025_07_25-20.52.05.763/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-b08d92a3-9c0a-4526-b12f-c973e9c3c43f1752071802867-2025_07_09-16.36.43.962/source.csv
ADDED
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
2,53,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"4:36:43 PM [info] Activating crowd-code\n4:36:43 PM [info] Recording started\n4:36:43 PM [info] Initializing git provider using file system watchers...\n4:36:43 PM [info] No workspace folder found\n",Log,tab
|
3 |
+
3,94480,"/Users/franzsrambical/.ssh/config",0,0,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical\n\nHost hpc-build01.scidom.de\n HostName hpc-build01.scidom.de\n User franz.srambical\n\nHost horeka.scc.kit.edu\n HostName horeka.scc.kit.edu\n User tum_dbd0378\n\nHost juwels-cluster.fz-juelich.de\n HostName juwels-cluster.fz-juelich.de\n IdentityFile ~/.ssh/id_ed25519\n User srambical2\n\nHost hpc-submit02.scidom.de\n HostName hpc-submit02.scidom.de\n User franz.srambical\n\nHost hpc-submit01.scidom.de\n HostName hpc-submit01.scidom.de\n User franz.srambical",plaintext,tab
|
4 |
+
4,95685,"/Users/franzsrambical/.ssh/config",26,0,"",plaintext,selection_command
|
5 |
+
5,95929,"/Users/franzsrambical/.ssh/config",58,0,"",plaintext,selection_command
|
6 |
+
6,95947,"/Users/franzsrambical/.ssh/config",81,0,"",plaintext,selection_command
|
7 |
+
7,95981,"/Users/franzsrambical/.ssh/config",82,0,"",plaintext,selection_command
|
8 |
+
8,96015,"/Users/franzsrambical/.ssh/config",109,0,"",plaintext,selection_command
|
9 |
+
9,96048,"/Users/franzsrambical/.ssh/config",142,0,"",plaintext,selection_command
|
10 |
+
10,96082,"/Users/franzsrambical/.ssh/config",165,0,"",plaintext,selection_command
|
11 |
+
11,96117,"/Users/franzsrambical/.ssh/config",166,0,"",plaintext,selection_command
|
12 |
+
12,96150,"/Users/franzsrambical/.ssh/config",190,0,"",plaintext,selection_command
|
13 |
+
13,96184,"/Users/franzsrambical/.ssh/config",220,0,"",plaintext,selection_command
|
14 |
+
14,96221,"/Users/franzsrambical/.ssh/config",190,0,"",plaintext,selection_command
|
15 |
+
15,96485,"/Users/franzsrambical/.ssh/config",166,0,"",plaintext,selection_command
|
16 |
+
16,96514,"/Users/franzsrambical/.ssh/config",165,0,"",plaintext,selection_command
|
17 |
+
17,96547,"/Users/franzsrambical/.ssh/config",142,0,"",plaintext,selection_command
|
18 |
+
18,96575,"/Users/franzsrambical/.ssh/config",109,0,"",plaintext,selection_command
|
19 |
+
19,96607,"/Users/franzsrambical/.ssh/config",82,0,"",plaintext,selection_command
|
20 |
+
20,96638,"/Users/franzsrambical/.ssh/config",81,0,"",plaintext,selection_command
|
21 |
+
21,96671,"/Users/franzsrambical/.ssh/config",58,0,"",plaintext,selection_command
|
22 |
+
22,96705,"/Users/franzsrambical/.ssh/config",26,0,"",plaintext,selection_command
|
23 |
+
23,96739,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,selection_command
|
24 |
+
24,100915,"/Users/franzsrambical/.ssh/config",0,25,"Host login.haicore.berlin",plaintext,selection_command
|
25 |
+
25,101063,"/Users/franzsrambical/.ssh/config",0,57,"Host login.haicore.berlin\n HostName login.haicore.berlin",plaintext,selection_command
|
26 |
+
26,101231,"/Users/franzsrambical/.ssh/config",0,80,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical",plaintext,selection_command
|
27 |
+
27,101369,"/Users/franzsrambical/.ssh/config",0,81,"Host login.haicore.berlin\n HostName login.haicore.berlin\n User franz.srambical\n",plaintext,selection_command
|
28 |
+
28,101616,"/Users/franzsrambical/.ssh/config",0,82,"",plaintext,content
|
29 |
+
29,106243,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
30 |
+
30,106263,"/Users/franzsrambical/.ssh/config",454,0,"\n",plaintext,content
|
31 |
+
31,108988,"/Users/franzsrambical/.ssh/config",454,1,"",plaintext,content
|
32 |
+
32,112261,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
33 |
+
33,112284,"/Users/franzsrambical/.ssh/config",454,0,"\n",plaintext,content
|
34 |
+
34,117860,"/Users/franzsrambical/.ssh/config",0,0,"",plaintext,tab
|
35 |
+
35,119181,"/Users/franzsrambical/.ssh/config",27,0,"",plaintext,selection_command
|
36 |
+
36,119428,"/Users/franzsrambical/.ssh/config",60,0,"",plaintext,selection_command
|
37 |
+
37,119459,"/Users/franzsrambical/.ssh/config",83,0,"",plaintext,selection_command
|
38 |
+
38,119491,"/Users/franzsrambical/.ssh/config",84,0,"",plaintext,selection_command
|
39 |
+
39,119521,"/Users/franzsrambical/.ssh/config",108,0,"",plaintext,selection_command
|
40 |
+
40,119553,"/Users/franzsrambical/.ssh/config",138,0,"",plaintext,selection_command
|
41 |
+
41,119587,"/Users/franzsrambical/.ssh/config",157,0,"",plaintext,selection_command
|
42 |
+
42,119621,"/Users/franzsrambical/.ssh/config",158,0,"",plaintext,selection_command
|
43 |
+
43,119654,"/Users/franzsrambical/.ssh/config",192,0,"",plaintext,selection_command
|
44 |
+
44,119688,"/Users/franzsrambical/.ssh/config",232,0,"",plaintext,selection_command
|
45 |
+
45,119721,"/Users/franzsrambical/.ssh/config",265,0,"",plaintext,selection_command
|
46 |
+
46,119755,"/Users/franzsrambical/.ssh/config",283,0,"",plaintext,selection_command
|
47 |
+
47,119788,"/Users/franzsrambical/.ssh/config",284,0,"",plaintext,selection_command
|
48 |
+
48,119821,"/Users/franzsrambical/.ssh/config",312,0,"",plaintext,selection_command
|
49 |
+
49,119855,"/Users/franzsrambical/.ssh/config",346,0,"",plaintext,selection_command
|
50 |
+
50,119889,"/Users/franzsrambical/.ssh/config",369,0,"",plaintext,selection_command
|
51 |
+
51,119922,"/Users/franzsrambical/.ssh/config",370,0,"",plaintext,selection_command
|
52 |
+
52,119958,"/Users/franzsrambical/.ssh/config",398,0,"",plaintext,selection_command
|
53 |
+
53,119991,"/Users/franzsrambical/.ssh/config",432,0,"",plaintext,selection_command
|
54 |
+
54,120022,"/Users/franzsrambical/.ssh/config",455,0,"",plaintext,selection_command
|
55 |
+
55,128496,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
56 |
+
56,128583,"/Users/franzsrambical/.ssh/config",456,0,"ssh [email protected]",plaintext,content
|
57 |
+
57,128584,"/Users/franzsrambical/.ssh/config",496,0,"",plaintext,selection_keyboard
|
58 |
+
58,130098,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
59 |
+
59,131052,"/Users/franzsrambical/.ssh/config",456,40,"ssh [email protected]",plaintext,selection_command
|
60 |
+
60,132210,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
61 |
+
61,132523,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
62 |
+
62,132854,"/Users/franzsrambical/.ssh/config",456,0,"H",plaintext,content
|
63 |
+
63,132856,"/Users/franzsrambical/.ssh/config",457,0,"",plaintext,selection_keyboard
|
64 |
+
64,133188,"/Users/franzsrambical/.ssh/config",457,0,"o",plaintext,content
|
65 |
+
65,133191,"/Users/franzsrambical/.ssh/config",458,0,"",plaintext,selection_keyboard
|
66 |
+
66,133287,"/Users/franzsrambical/.ssh/config",458,0,"s",plaintext,content
|
67 |
+
67,133290,"/Users/franzsrambical/.ssh/config",459,0,"",plaintext,selection_keyboard
|
68 |
+
68,133387,"/Users/franzsrambical/.ssh/config",459,0,"t",plaintext,content
|
69 |
+
69,133389,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_keyboard
|
70 |
+
70,133432,"/Users/franzsrambical/.ssh/config",460,0," ",plaintext,content
|
71 |
+
71,133434,"/Users/franzsrambical/.ssh/config",461,0,"",plaintext,selection_keyboard
|
72 |
+
72,146263,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_command
|
73 |
+
73,146405,"/Users/franzsrambical/.ssh/config",456,6,"",plaintext,content
|
74 |
+
74,146409,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_command
|
75 |
+
75,146556,"/Users/franzsrambical/.ssh/config",455,41,"",plaintext,content
|
76 |
+
76,148933,"/Users/franzsrambical/.ssh/config",455,0,"\n",plaintext,content
|
77 |
+
77,149329,"/Users/franzsrambical/.ssh/config",456,0,"H",plaintext,content
|
78 |
+
78,149331,"/Users/franzsrambical/.ssh/config",457,0,"",plaintext,selection_keyboard
|
79 |
+
79,149492,"/Users/franzsrambical/.ssh/config",457,0,"o",plaintext,content
|
80 |
+
80,149495,"/Users/franzsrambical/.ssh/config",458,0,"",plaintext,selection_keyboard
|
81 |
+
81,149560,"/Users/franzsrambical/.ssh/config",458,0,"s",plaintext,content
|
82 |
+
82,149562,"/Users/franzsrambical/.ssh/config",459,0,"",plaintext,selection_keyboard
|
83 |
+
83,149610,"/Users/franzsrambical/.ssh/config",459,0,"t",plaintext,content
|
84 |
+
84,149612,"/Users/franzsrambical/.ssh/config",460,0,"",plaintext,selection_keyboard
|
85 |
+
85,149655,"/Users/franzsrambical/.ssh/config",460,0," ",plaintext,content
|
86 |
+
86,149657,"/Users/franzsrambical/.ssh/config",461,0,"",plaintext,selection_keyboard
|
87 |
+
87,153162,"/Users/franzsrambical/.ssh/config",461,0,"l",plaintext,content
|
88 |
+
88,153164,"/Users/franzsrambical/.ssh/config",462,0,"",plaintext,selection_keyboard
|
89 |
+
89,153299,"/Users/franzsrambical/.ssh/config",462,0,"o",plaintext,content
|
90 |
+
90,153301,"/Users/franzsrambical/.ssh/config",463,0,"",plaintext,selection_keyboard
|
91 |
+
91,153371,"/Users/franzsrambical/.ssh/config",463,0,"g",plaintext,content
|
92 |
+
92,153371,"/Users/franzsrambical/.ssh/config",464,0,"",plaintext,selection_keyboard
|
93 |
+
93,153460,"/Users/franzsrambical/.ssh/config",464,0,"i",plaintext,content
|
94 |
+
94,153461,"/Users/franzsrambical/.ssh/config",465,0,"",plaintext,selection_keyboard
|
95 |
+
95,153530,"/Users/franzsrambical/.ssh/config",465,0,"n",plaintext,content
|
96 |
+
96,153531,"/Users/franzsrambical/.ssh/config",466,0,"",plaintext,selection_keyboard
|
97 |
+
97,153785,"/Users/franzsrambical/.ssh/config",466,0,"@",plaintext,content
|
98 |
+
98,153786,"/Users/franzsrambical/.ssh/config",467,0,"",plaintext,selection_keyboard
|
99 |
+
99,154547,"/Users/franzsrambical/.ssh/config",466,1,"",plaintext,content
|
100 |
+
100,155018,"/Users/franzsrambical/.ssh/config",466,0,".",plaintext,content
|
101 |
+
101,155020,"/Users/franzsrambical/.ssh/config",467,0,"",plaintext,selection_keyboard
|
102 |
+
102,155700,"/Users/franzsrambical/.ssh/config",467,0,"h",plaintext,content
|
103 |
+
103,155701,"/Users/franzsrambical/.ssh/config",468,0,"",plaintext,selection_keyboard
|
104 |
+
104,155757,"/Users/franzsrambical/.ssh/config",468,0,"a",plaintext,content
|
105 |
+
105,155760,"/Users/franzsrambical/.ssh/config",469,0,"",plaintext,selection_keyboard
|
106 |
+
106,155841,"/Users/franzsrambical/.ssh/config",469,0,"i",plaintext,content
|
107 |
+
107,155843,"/Users/franzsrambical/.ssh/config",470,0,"",plaintext,selection_keyboard
|
108 |
+
108,155901,"/Users/franzsrambical/.ssh/config",470,0,"c",plaintext,content
|
109 |
+
109,155902,"/Users/franzsrambical/.ssh/config",471,0,"",plaintext,selection_keyboard
|
110 |
+
110,156007,"/Users/franzsrambical/.ssh/config",471,0,"o",plaintext,content
|
111 |
+
111,156008,"/Users/franzsrambical/.ssh/config",472,0,"",plaintext,selection_keyboard
|
112 |
+
112,156111,"/Users/franzsrambical/.ssh/config",472,0,"r",plaintext,content
|
113 |
+
113,156112,"/Users/franzsrambical/.ssh/config",473,0,"",plaintext,selection_keyboard
|
114 |
+
114,156168,"/Users/franzsrambical/.ssh/config",473,0,"e",plaintext,content
|
115 |
+
115,156169,"/Users/franzsrambical/.ssh/config",474,0,"",plaintext,selection_keyboard
|
116 |
+
116,156248,"/Users/franzsrambical/.ssh/config",474,0,".",plaintext,content
|
117 |
+
117,156249,"/Users/franzsrambical/.ssh/config",475,0,"",plaintext,selection_keyboard
|
118 |
+
118,156396,"/Users/franzsrambical/.ssh/config",475,0,"b",plaintext,content
|
119 |
+
119,156397,"/Users/franzsrambical/.ssh/config",476,0,"",plaintext,selection_keyboard
|
120 |
+
120,156584,"/Users/franzsrambical/.ssh/config",476,0,"e",plaintext,content
|
121 |
+
121,156586,"/Users/franzsrambical/.ssh/config",477,0,"",plaintext,selection_keyboard
|
122 |
+
122,156644,"/Users/franzsrambical/.ssh/config",477,0,"r",plaintext,content
|
123 |
+
123,156645,"/Users/franzsrambical/.ssh/config",478,0,"",plaintext,selection_keyboard
|
124 |
+
124,156709,"/Users/franzsrambical/.ssh/config",478,0,"l",plaintext,content
|
125 |
+
125,156710,"/Users/franzsrambical/.ssh/config",479,0,"",plaintext,selection_keyboard
|
126 |
+
126,156824,"/Users/franzsrambical/.ssh/config",479,0,"i",plaintext,content
|
127 |
+
127,156825,"/Users/franzsrambical/.ssh/config",480,0,"",plaintext,selection_keyboard
|
128 |
+
128,156896,"/Users/franzsrambical/.ssh/config",480,0,"n",plaintext,content
|
129 |
+
129,156896,"/Users/franzsrambical/.ssh/config",481,0,"",plaintext,selection_keyboard
|
130 |
+
130,157332,"/Users/franzsrambical/.ssh/config",481,0,"\n",plaintext,content
|
131 |
+
131,157969,"/Users/franzsrambical/.ssh/config",482,0," ",plaintext,content
|
132 |
+
132,158635,"/Users/franzsrambical/.ssh/config",484,0,"H",plaintext,content
|
133 |
+
133,158636,"/Users/franzsrambical/.ssh/config",485,0,"",plaintext,selection_keyboard
|
134 |
+
134,158766,"/Users/franzsrambical/.ssh/config",485,0,"o",plaintext,content
|
135 |
+
135,158768,"/Users/franzsrambical/.ssh/config",486,0,"",plaintext,selection_keyboard
|
136 |
+
136,158794,"/Users/franzsrambical/.ssh/config",486,0,"s",plaintext,content
|
137 |
+
137,158797,"/Users/franzsrambical/.ssh/config",487,0,"",plaintext,selection_keyboard
|
138 |
+
138,158853,"/Users/franzsrambical/.ssh/config",487,0,"t",plaintext,content
|
139 |
+
139,158855,"/Users/franzsrambical/.ssh/config",488,0,"",plaintext,selection_keyboard
|
140 |
+
140,159080,"/Users/franzsrambical/.ssh/config",488,0,"N",plaintext,content
|
141 |
+
141,159083,"/Users/franzsrambical/.ssh/config",489,0,"",plaintext,selection_keyboard
|
142 |
+
142,159245,"/Users/franzsrambical/.ssh/config",489,0,"a",plaintext,content
|
143 |
+
143,159247,"/Users/franzsrambical/.ssh/config",490,0,"",plaintext,selection_keyboard
|
144 |
+
144,159281,"/Users/franzsrambical/.ssh/config",490,0,"m",plaintext,content
|
145 |
+
145,159283,"/Users/franzsrambical/.ssh/config",491,0,"",plaintext,selection_keyboard
|
146 |
+
146,159432,"/Users/franzsrambical/.ssh/config",491,0,"e",plaintext,content
|
147 |
+
147,159434,"/Users/franzsrambical/.ssh/config",492,0,"",plaintext,selection_keyboard
|
148 |
+
148,164462,"/Users/franzsrambical/.ssh/config",492,0," ",plaintext,content
|
149 |
+
149,164464,"/Users/franzsrambical/.ssh/config",493,0,"",plaintext,selection_keyboard
|
150 |
+
150,165235,"/Users/franzsrambical/.ssh/config",493,0,"l",plaintext,content
|
151 |
+
151,165237,"/Users/franzsrambical/.ssh/config",494,0,"",plaintext,selection_keyboard
|
152 |
+
152,165387,"/Users/franzsrambical/.ssh/config",494,0,"o",plaintext,content
|
153 |
+
153,165391,"/Users/franzsrambical/.ssh/config",495,0,"",plaintext,selection_keyboard
|
154 |
+
154,165462,"/Users/franzsrambical/.ssh/config",495,0,"g",plaintext,content
|
155 |
+
155,165465,"/Users/franzsrambical/.ssh/config",496,0,"",plaintext,selection_keyboard
|
156 |
+
156,165536,"/Users/franzsrambical/.ssh/config",496,0,"i",plaintext,content
|
157 |
+
157,165538,"/Users/franzsrambical/.ssh/config",497,0,"",plaintext,selection_keyboard
|
158 |
+
158,165626,"/Users/franzsrambical/.ssh/config",497,0,"n",plaintext,content
|
159 |
+
159,165628,"/Users/franzsrambical/.ssh/config",498,0,"",plaintext,selection_keyboard
|
160 |
+
160,165774,"/Users/franzsrambical/.ssh/config",498,0,".",plaintext,content
|
161 |
+
161,165777,"/Users/franzsrambical/.ssh/config",499,0,"",plaintext,selection_keyboard
|
162 |
+
162,166016,"/Users/franzsrambical/.ssh/config",499,0,"h",plaintext,content
|
163 |
+
163,166018,"/Users/franzsrambical/.ssh/config",500,0,"",plaintext,selection_keyboard
|
164 |
+
164,166062,"/Users/franzsrambical/.ssh/config",500,0,"a",plaintext,content
|
165 |
+
165,166064,"/Users/franzsrambical/.ssh/config",501,0,"",plaintext,selection_keyboard
|
166 |
+
166,166165,"/Users/franzsrambical/.ssh/config",501,0,"i",plaintext,content
|
167 |
+
167,166167,"/Users/franzsrambical/.ssh/config",502,0,"",plaintext,selection_keyboard
|
168 |
+
168,166294,"/Users/franzsrambical/.ssh/config",502,0,"c",plaintext,content
|
169 |
+
169,166296,"/Users/franzsrambical/.ssh/config",503,0,"",plaintext,selection_keyboard
|
170 |
+
170,166400,"/Users/franzsrambical/.ssh/config",503,0,"o",plaintext,content
|
171 |
+
171,166402,"/Users/franzsrambical/.ssh/config",504,0,"",plaintext,selection_keyboard
|
172 |
+
172,166494,"/Users/franzsrambical/.ssh/config",504,0,"r",plaintext,content
|
173 |
+
173,166496,"/Users/franzsrambical/.ssh/config",505,0,"",plaintext,selection_keyboard
|
174 |
+
174,166569,"/Users/franzsrambical/.ssh/config",505,0,"e",plaintext,content
|
175 |
+
175,166571,"/Users/franzsrambical/.ssh/config",506,0,"",plaintext,selection_keyboard
|
176 |
+
176,166623,"/Users/franzsrambical/.ssh/config",506,0,".",plaintext,content
|
177 |
+
177,166625,"/Users/franzsrambical/.ssh/config",507,0,"",plaintext,selection_keyboard
|
178 |
+
178,166811,"/Users/franzsrambical/.ssh/config",507,0,"b",plaintext,content
|
179 |
+
179,166812,"/Users/franzsrambical/.ssh/config",508,0,"",plaintext,selection_keyboard
|
180 |
+
180,166871,"/Users/franzsrambical/.ssh/config",508,0,"e",plaintext,content
|
181 |
+
181,166873,"/Users/franzsrambical/.ssh/config",509,0,"",plaintext,selection_keyboard
|
182 |
+
182,166947,"/Users/franzsrambical/.ssh/config",509,0,"r",plaintext,content
|
183 |
+
183,166948,"/Users/franzsrambical/.ssh/config",510,0,"",plaintext,selection_keyboard
|
184 |
+
184,166988,"/Users/franzsrambical/.ssh/config",510,0,"l",plaintext,content
|
185 |
+
185,166989,"/Users/franzsrambical/.ssh/config",511,0,"",plaintext,selection_keyboard
|
186 |
+
186,167052,"/Users/franzsrambical/.ssh/config",511,0,"i",plaintext,content
|
187 |
+
187,167054,"/Users/franzsrambical/.ssh/config",512,0,"",plaintext,selection_keyboard
|
188 |
+
188,167115,"/Users/franzsrambical/.ssh/config",512,0,"n",plaintext,content
|
189 |
+
189,167117,"/Users/franzsrambical/.ssh/config",513,0,"",plaintext,selection_keyboard
|
190 |
+
190,167392,"/Users/franzsrambical/.ssh/config",507,6,"berlin",plaintext,content
|
191 |
+
191,167715,"/Users/franzsrambical/.ssh/config",513,0,"\n ",plaintext,content
|
192 |
+
192,169941,"/Users/franzsrambical/.ssh/config",516,0,"U",plaintext,content
|
193 |
+
193,169943,"/Users/franzsrambical/.ssh/config",517,0,"",plaintext,selection_keyboard
|
194 |
+
194,170103,"/Users/franzsrambical/.ssh/config",517,0,"s",plaintext,content
|
195 |
+
195,170105,"/Users/franzsrambical/.ssh/config",518,0,"",plaintext,selection_keyboard
|
196 |
+
196,170170,"/Users/franzsrambical/.ssh/config",518,0,"e",plaintext,content
|
197 |
+
197,170172,"/Users/franzsrambical/.ssh/config",519,0,"",plaintext,selection_keyboard
|
198 |
+
198,170250,"/Users/franzsrambical/.ssh/config",519,0,"r",plaintext,content
|
199 |
+
199,170251,"/Users/franzsrambical/.ssh/config",520,0,"",plaintext,selection_keyboard
|
200 |
+
200,171002,"/Users/franzsrambical/.ssh/config",520,0," ",plaintext,content
|
201 |
+
201,171005,"/Users/franzsrambical/.ssh/config",521,0,"",plaintext,selection_keyboard
|
202 |
+
202,171222,"/Users/franzsrambical/.ssh/config",521,0,"f",plaintext,content
|
203 |
+
203,171223,"/Users/franzsrambical/.ssh/config",522,0,"",plaintext,selection_keyboard
|
204 |
+
204,171374,"/Users/franzsrambical/.ssh/config",522,0,"r",plaintext,content
|
205 |
+
205,171379,"/Users/franzsrambical/.ssh/config",523,0,"",plaintext,selection_keyboard
|
206 |
+
206,171397,"/Users/franzsrambical/.ssh/config",523,0,"a",plaintext,content
|
207 |
+
207,171398,"/Users/franzsrambical/.ssh/config",524,0,"",plaintext,selection_keyboard
|
208 |
+
208,171500,"/Users/franzsrambical/.ssh/config",524,0,"n",plaintext,content
|
209 |
+
209,171501,"/Users/franzsrambical/.ssh/config",525,0,"",plaintext,selection_keyboard
|
210 |
+
210,171581,"/Users/franzsrambical/.ssh/config",525,0,"z",plaintext,content
|
211 |
+
211,171583,"/Users/franzsrambical/.ssh/config",526,0,"",plaintext,selection_keyboard
|
212 |
+
212,171700,"/Users/franzsrambical/.ssh/config",526,0,".",plaintext,content
|
213 |
+
213,171701,"/Users/franzsrambical/.ssh/config",527,0,"",plaintext,selection_keyboard
|
214 |
+
214,171772,"/Users/franzsrambical/.ssh/config",527,0,"s",plaintext,content
|
215 |
+
215,171773,"/Users/franzsrambical/.ssh/config",528,0,"",plaintext,selection_keyboard
|
216 |
+
216,171831,"/Users/franzsrambical/.ssh/config",528,0,"r",plaintext,content
|
217 |
+
217,171833,"/Users/franzsrambical/.ssh/config",529,0,"",plaintext,selection_keyboard
|
218 |
+
218,171914,"/Users/franzsrambical/.ssh/config",529,0,"a",plaintext,content
|
219 |
+
219,171916,"/Users/franzsrambical/.ssh/config",530,0,"",plaintext,selection_keyboard
|
220 |
+
220,171943,"/Users/franzsrambical/.ssh/config",530,0,"m",plaintext,content
|
221 |
+
221,171945,"/Users/franzsrambical/.ssh/config",531,0,"",plaintext,selection_keyboard
|
222 |
+
222,172140,"/Users/franzsrambical/.ssh/config",531,0,"b",plaintext,content
|
223 |
+
223,172143,"/Users/franzsrambical/.ssh/config",532,0,"",plaintext,selection_keyboard
|
224 |
+
224,172186,"/Users/franzsrambical/.ssh/config",532,0,"i",plaintext,content
|
225 |
+
225,172188,"/Users/franzsrambical/.ssh/config",533,0,"",plaintext,selection_keyboard
|
226 |
+
226,172245,"/Users/franzsrambical/.ssh/config",533,0,"c",plaintext,content
|
227 |
+
227,172247,"/Users/franzsrambical/.ssh/config",534,0,"",plaintext,selection_keyboard
|
228 |
+
228,172289,"/Users/franzsrambical/.ssh/config",534,0,"a",plaintext,content
|
229 |
+
229,172291,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_keyboard
|
230 |
+
230,172390,"/Users/franzsrambical/.ssh/config",535,0,"l",plaintext,content
|
231 |
+
231,172391,"/Users/franzsrambical/.ssh/config",536,0,"",plaintext,selection_keyboard
|
232 |
+
232,172556,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_command
|
233 |
+
233,172719,"/Users/franzsrambical/.ssh/config",503,0,"",plaintext,selection_command
|
234 |
+
234,172910,"/Users/franzsrambical/.ssh/config",513,0,"\n ",plaintext,content
|
235 |
+
235,173988,"/Users/franzsrambical/.ssh/config",516,0,"I",plaintext,content
|
236 |
+
236,173989,"/Users/franzsrambical/.ssh/config",517,0,"",plaintext,selection_keyboard
|
237 |
+
237,174082,"/Users/franzsrambical/.ssh/config",517,0,"d",plaintext,content
|
238 |
+
238,174084,"/Users/franzsrambical/.ssh/config",518,0,"",plaintext,selection_keyboard
|
239 |
+
239,174148,"/Users/franzsrambical/.ssh/config",518,0,"e",plaintext,content
|
240 |
+
240,174150,"/Users/franzsrambical/.ssh/config",519,0,"",plaintext,selection_keyboard
|
241 |
+
241,174271,"/Users/franzsrambical/.ssh/config",519,0,"n",plaintext,content
|
242 |
+
242,174273,"/Users/franzsrambical/.ssh/config",520,0,"",plaintext,selection_keyboard
|
243 |
+
243,174746,"/Users/franzsrambical/.ssh/config",516,4,"IdentityFile",plaintext,content
|
244 |
+
244,175993,"/Users/franzsrambical/.ssh/config",528,0," ",plaintext,content
|
245 |
+
245,175995,"/Users/franzsrambical/.ssh/config",529,0,"",plaintext,selection_keyboard
|
246 |
+
246,176464,"/Users/franzsrambical/.ssh/config",529,0,"~",plaintext,content
|
247 |
+
247,176467,"/Users/franzsrambical/.ssh/config",530,0,"",plaintext,selection_keyboard
|
248 |
+
248,177648,"/Users/franzsrambical/.ssh/config",530,0,"/",plaintext,content
|
249 |
+
249,177649,"/Users/franzsrambical/.ssh/config",531,0,"",plaintext,selection_keyboard
|
250 |
+
250,177830,"/Users/franzsrambical/.ssh/config",531,0,".",plaintext,content
|
251 |
+
251,177832,"/Users/franzsrambical/.ssh/config",532,0,"",plaintext,selection_keyboard
|
252 |
+
252,177940,"/Users/franzsrambical/.ssh/config",532,0,"s",plaintext,content
|
253 |
+
253,177942,"/Users/franzsrambical/.ssh/config",533,0,"",plaintext,selection_keyboard
|
254 |
+
254,178078,"/Users/franzsrambical/.ssh/config",533,0,"s",plaintext,content
|
255 |
+
255,178081,"/Users/franzsrambical/.ssh/config",534,0,"",plaintext,selection_keyboard
|
256 |
+
256,178173,"/Users/franzsrambical/.ssh/config",534,0,"h",plaintext,content
|
257 |
+
257,178175,"/Users/franzsrambical/.ssh/config",535,0,"",plaintext,selection_keyboard
|
258 |
+
258,179348,"/Users/franzsrambical/.ssh/config",535,0,"/",plaintext,content
|
259 |
+
259,179351,"/Users/franzsrambical/.ssh/config",536,0,"",plaintext,selection_keyboard
|
260 |
+
260,179561,"/Users/franzsrambical/.ssh/config",536,0,"i",plaintext,content
|
261 |
+
261,179563,"/Users/franzsrambical/.ssh/config",537,0,"",plaintext,selection_keyboard
|
262 |
+
262,179646,"/Users/franzsrambical/.ssh/config",537,0,"d",plaintext,content
|
263 |
+
263,179648,"/Users/franzsrambical/.ssh/config",538,0,"",plaintext,selection_keyboard
|
264 |
+
264,179919,"/Users/franzsrambical/.ssh/config",538,0,"_",plaintext,content
|
265 |
+
265,179922,"/Users/franzsrambical/.ssh/config",539,0,"",plaintext,selection_keyboard
|
266 |
+
266,180693,"/Users/franzsrambical/.ssh/config",536,3,"id_ed25519",plaintext,content
|
267 |
+
267,181818,"/Users/franzsrambical/.ssh/config",545,0,"",plaintext,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-b2ff9c32-0980-46be-b0f1-51dff76665011752660866284-2025_07_16-12.14.34.814/source.csv
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,3,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n min_lr: float = 0.0\n max_lr: float = 3e-5\n warmup_steps: int = 5000\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args,\n )\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add('model_state', ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler)\n handler_registry.add('model_state', ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler)\n handler_registry.add('dataloader_state', grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add('dataloader_state', grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n \n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n \n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n \n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(ocp.utils.to_shape_dtype_struct, train_state)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n )\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(grain_iterator),\n )\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()",python,tab
|
3 |
+
2,223,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:14:34 PM [info] Activating crowd-code\n12:14:34 PM [info] Recording started\n12:14:34 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,271,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"12:14:34 PM [info] Git repository found\n12:14:34 PM [info] Git provider initialized successfully\n12:14:34 PM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,360,"benchmark_batch_sizes_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\nimport json\nfrom typing import List, Dict, Any, Optional\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport grain\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\ndef create_mock_dataloader(batch_size: int, seq_len: int, image_height: int, image_width: int, image_channels: int, num_devices: int):\n """"""\n Create a mock dataloader that generates instant dummy data.\n This eliminates IO bottlenecks to test if you're memory-bound.\n """"""\n per_device_batch_size = batch_size // num_devices\n \n def mock_dataloader():\n """"""Generator that yields dummy video data instantly.""""""\n while True:\n # Generate random dummy data with the correct shape\n # Using jax.random for consistency with JAX\n dummy_videos = jax.random.randint(\n jax.random.PRNGKey(0), # Fixed key for deterministic but random data\n (per_device_batch_size, seq_len, image_height, image_width, image_channels),\n minval=0,\n maxval=255,\n dtype=jnp.uint8\n )\n yield dummy_videos\n \n return mock_dataloader()\n\n\n@dataclass\nclass BenchmarkArgs:\n # Model parameters (same as train_tokenizer.py)\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""/home/franz.srambical/jafar/data_arrayrecord/dummy""\n \n # Tokenizer parameters\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n dtype: jnp.dtype = jnp.bfloat16\n param_dtype: jnp.dtype = jnp.float32\n use_flash_attention: bool = True\n \n # Benchmark parameters\n batch_sizes: List[int] = field(default_factory=lambda: [8*1, 9*1, 10*1, 11*1, 12*1, 13*1, 14*1, 15*1])\n warmup_steps: int = 10 # Steps to warm up before measuring\n measure_steps: int = 30 # Steps to measure\n num_iterations: int = 10 # Number of iterations to average over\n output_file: str = ""batch_size_benchmark_tokenizer_flash_attention.json""\n use_mock_dataloader: bool = False # Use mock dataloader instead of real one\n \n # VQ parameters\n vq_beta: float = 0.25\n\n\nargs = tyro.cli(BenchmarkArgs)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n return loss, outputs[""recon""]\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, recon), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n return state, loss, recon\n\n\ndef benchmark_batch_size(batch_size: int, tokenizer, train_state, mesh, videos_sharding) -> Optional[Dict[str, Any]]:\n """"""Benchmark a specific batch size and return throughput metrics.""""""\n print(f""\nBenchmarking batch size: {batch_size}"")\n \n num_devices = jax.device_count()\n per_device_batch_size = batch_size // num_devices\n \n # Create dataloader for this batch size\n if args.use_mock_dataloader:\n print("" Using mock dataloader (instant dummy data)"")\n image_shape = (args.image_height, args.image_width, args.image_channels)\n mock_dataloader = create_mock_dataloader(\n batch_size, args.seq_len, args.image_height, args.image_width, args.image_channels, num_devices\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, np.array(elem)) for elem in mock_dataloader)\n else:\n print("" Using real dataloader"")\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n image_shape = (args.image_height, args.image_width, args.image_channels)\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator)\n \n rng = jax.random.PRNGKey(args.seed)\n step_times = []\n \n # Warmup phase\n print(f"" Warming up for {args.warmup_steps} steps..."")\n for step in range(args.warmup_steps):\n try:\n videos = next(dataloader)\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon = train_step(train_state, inputs)\n jax.block_until_ready(train_state)\n except StopIteration:\n print(f"" Warning: Dataloader exhausted during warmup at step {step}"")\n break\n \n # Measurement phase\n print(f"" Measuring for {args.measure_steps} steps..."")\n start_time = time.time()\n for step in range(args.measure_steps):\n try:\n videos = next(dataloader)\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon = train_step(train_state, inputs)\n jax.block_until_ready(train_state)\n \n if step > 0: # Skip first step as it might include compilation time\n step_times.append(time.time() - start_time)\n start_time = time.time()\n \n except StopIteration:\n print(f"" Warning: Dataloader exhausted during measurement at step {step}"")\n break\n \n if len(step_times) < 5:\n print(f"" Warning: Only {len(step_times)} step times measured, results may be unreliable"")\n return None\n \n # Calculate metrics\n avg_step_time = np.mean(step_times)\n std_step_time = np.std(step_times)\n throughput = batch_size / avg_step_time\n \n print(f"" Results: avg_step_time={avg_step_time:.4f}s ± {std_step_time:.4f}s, throughput={throughput:.2f} samples/s"")\n \n return {\n ""batch_size"": batch_size,\n ""avg_step_time"": avg_step_time,\n ""std_step_time"": std_step_time,\n ""throughput"": throughput,\n ""num_measurements"": len(step_times),\n ""dataloader_type"": ""mock"" if args.use_mock_dataloader else ""real""\n }\n\n\ndef main():\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n # Check if all batch sizes are divisible by number of devices\n for batch_size in args.batch_sizes:\n if batch_size % num_devices != 0:\n raise ValueError(\n f""Batch size {batch_size} must be divisible by number of devices {num_devices}.""\n )\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n \n # Initialize with smallest batch size for model setup\n min_batch_size = min(args.batch_sizes)\n per_device_batch_size_for_init = min_batch_size // num_devices\n \n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(0.0, 3e-4, 1000, 10000)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # Setup sharding\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # Run benchmarks\n results = []\n for batch_size in args.batch_sizes:\n result = benchmark_batch_size(batch_size, tokenizer, train_state, mesh, videos_sharding)\n if result is not None:\n results.append(result)\n \n # Save results\n benchmark_data = {\n ""num_devices"": num_devices,\n ""param_counts"": param_counts,\n ""results"": results\n }\n \n with open(args.output_file, 'w') as f:\n json.dump(benchmark_data, f, indent=2)\n \n print(f""\nBenchmark results saved to {args.output_file}"")\n \n # Print summary\n print(""\nBatch Size vs Throughput Summary:"")\n print(""Batch Size | Avg Step Time (s) | Throughput (samples/s)"")\n print(""-"" * 55)\n for result in results:\n print(f""{result['batch_size']:9d} | {result['avg_step_time']:15.4f} | {result['throughput']:20.2f}"")\n\n\nif __name__ == ""__main__"":\n main() ",python,tab
|
6 |
+
5,2163,"benchmark_batch_sizes_tokenizer.py",1993,0,"",python,selection_mouse
|
7 |
+
6,2168,"benchmark_batch_sizes_tokenizer.py",1992,0,"",python,selection_command
|
8 |
+
7,3127,"benchmark_batch_sizes_tokenizer.py",0,0,"",python,selection_command
|
9 |
+
8,4003,"benchmark_batch_sizes_tokenizer.py",0,10804,"from dataclasses import dataclass, field\nimport os\nimport time\nimport json\nfrom typing import List, Dict, Any, Optional\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport grain\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.parameter_utils import count_parameters_by_component\n\n\ndef create_mock_dataloader(batch_size: int, seq_len: int, image_height: int, image_width: int, image_channels: int, num_devices: int):\n """"""\n Create a mock dataloader that generates instant dummy data.\n This eliminates IO bottlenecks to test if you're memory-bound.\n """"""\n per_device_batch_size = batch_size // num_devices\n \n def mock_dataloader():\n """"""Generator that yields dummy video data instantly.""""""\n while True:\n # Generate random dummy data with the correct shape\n # Using jax.random for consistency with JAX\n dummy_videos = jax.random.randint(\n jax.random.PRNGKey(0), # Fixed key for deterministic but random data\n (per_device_batch_size, seq_len, image_height, image_width, image_channels),\n minval=0,\n maxval=255,\n dtype=jnp.uint8\n )\n yield dummy_videos\n \n return mock_dataloader()\n\n\n@dataclass\nclass BenchmarkArgs:\n # Model parameters (same as train_tokenizer.py)\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""/home/franz.srambical/jafar/data_arrayrecord/dummy""\n \n # Tokenizer parameters\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n dtype: jnp.dtype = jnp.bfloat16\n param_dtype: jnp.dtype = jnp.float32\n use_flash_attention: bool = True\n \n # Benchmark parameters\n batch_sizes: List[int] = field(default_factory=lambda: [8*1, 9*1, 10*1, 11*1, 12*1, 13*1, 14*1, 15*1])\n warmup_steps: int = 10 # Steps to warm up before measuring\n measure_steps: int = 30 # Steps to measure\n num_iterations: int = 10 # Number of iterations to average over\n output_file: str = ""batch_size_benchmark_tokenizer_flash_attention.json""\n use_mock_dataloader: bool = False # Use mock dataloader instead of real one\n \n # VQ parameters\n vq_beta: float = 0.25\n\n\nargs = tyro.cli(BenchmarkArgs)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n return loss, outputs[""recon""]\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, recon), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n return state, loss, recon\n\n\ndef benchmark_batch_size(batch_size: int, tokenizer, train_state, mesh, videos_sharding) -> Optional[Dict[str, Any]]:\n """"""Benchmark a specific batch size and return throughput metrics.""""""\n print(f""\nBenchmarking batch size: {batch_size}"")\n \n num_devices = jax.device_count()\n per_device_batch_size = batch_size // num_devices\n \n # Create dataloader for this batch size\n if args.use_mock_dataloader:\n print("" Using mock dataloader (instant dummy data)"")\n image_shape = (args.image_height, args.image_width, args.image_channels)\n mock_dataloader = create_mock_dataloader(\n batch_size, args.seq_len, args.image_height, args.image_width, args.image_channels, num_devices\n )\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, np.array(elem)) for elem in mock_dataloader)\n else:\n print("" Using real dataloader"")\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n image_shape = (args.image_height, args.image_width, args.image_channels)\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator)\n \n rng = jax.random.PRNGKey(args.seed)\n step_times = []\n \n # Warmup phase\n print(f"" Warming up for {args.warmup_steps} steps..."")\n for step in range(args.warmup_steps):\n try:\n videos = next(dataloader)\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon = train_step(train_state, inputs)\n jax.block_until_ready(train_state)\n except StopIteration:\n print(f"" Warning: Dataloader exhausted during warmup at step {step}"")\n break\n \n # Measurement phase\n print(f"" Measuring for {args.measure_steps} steps..."")\n start_time = time.time()\n for step in range(args.measure_steps):\n try:\n videos = next(dataloader)\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon = train_step(train_state, inputs)\n jax.block_until_ready(train_state)\n \n if step > 0: # Skip first step as it might include compilation time\n step_times.append(time.time() - start_time)\n start_time = time.time()\n \n except StopIteration:\n print(f"" Warning: Dataloader exhausted during measurement at step {step}"")\n break\n \n if len(step_times) < 5:\n print(f"" Warning: Only {len(step_times)} step times measured, results may be unreliable"")\n return None\n \n # Calculate metrics\n avg_step_time = np.mean(step_times)\n std_step_time = np.std(step_times)\n throughput = batch_size / avg_step_time\n \n print(f"" Results: avg_step_time={avg_step_time:.4f}s ± {std_step_time:.4f}s, throughput={throughput:.2f} samples/s"")\n \n return {\n ""batch_size"": batch_size,\n ""avg_step_time"": avg_step_time,\n ""std_step_time"": std_step_time,\n ""throughput"": throughput,\n ""num_measurements"": len(step_times),\n ""dataloader_type"": ""mock"" if args.use_mock_dataloader else ""real""\n }\n\n\ndef main():\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n # Check if all batch sizes are divisible by number of devices\n for batch_size in args.batch_sizes:\n if batch_size % num_devices != 0:\n raise ValueError(\n f""Batch size {batch_size} must be divisible by number of devices {num_devices}.""\n )\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n \n # Initialize with smallest batch size for model setup\n min_batch_size = min(args.batch_sizes)\n per_device_batch_size_for_init = min_batch_size // num_devices\n \n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(0.0, 3e-4, 1000, 10000)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # Setup sharding\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # Run benchmarks\n results = []\n for batch_size in args.batch_sizes:\n result = benchmark_batch_size(batch_size, tokenizer, train_state, mesh, videos_sharding)\n if result is not None:\n results.append(result)\n \n # Save results\n benchmark_data = {\n ""num_devices"": num_devices,\n ""param_counts"": param_counts,\n ""results"": results\n }\n \n with open(args.output_file, 'w') as f:\n json.dump(benchmark_data, f, indent=2)\n \n print(f""\nBenchmark results saved to {args.output_file}"")\n \n # Print summary\n print(""\nBatch Size vs Throughput Summary:"")\n print(""Batch Size | Avg Step Time (s) | Throughput (samples/s)"")\n print(""-"" * 55)\n for result in results:\n print(f""{result['batch_size']:9d} | {result['avg_step_time']:15.4f} | {result['throughput']:20.2f}"")\n\n\nif __name__ == ""__main__"":\n main() ",python,selection_command
|
10 |
+
9,4181,"benchmark_batch_sizes_tokenizer.py",10793,0,"",python,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-cc98355f-a627-4bb1-8368-cc01d32ba9141754309337509-2025_08_04-14.09.04.547/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-cdede756-87c5-47af-85f8-bc9bf1c41bac1750785125700-2025_06_24-22.05.07.988/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-d5ccbc8d-af3a-4c10-ab81-61e24d56bad31754400813118-2025_08_05-15.33.50.504/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-d9c3341f-490f-4227-8214-c68384e56a1f1753945239481-2025_07_31-09.00.53.845/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-dca3235c-6967-4aac-8b57-97fa2f10e0ad1753259782320-2025_07_23-10.36.33.63/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-df82217c-210f-4b7f-897f-6f53642cf7c81754033699441-2025_08_01-09.35.08.848/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-e7d20f74-415c-47d0-ad95-3f6da31696d51753194904459-2025_07_22-16.35.52.74/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f0382786-979c-4a6d-8e9b-f5977f18eb4f1753726151187-2025_08_02-06.58.58.573/source.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,1,"sample.py",0,0,"from dataclasses import dataclass\nimport time\nimport os\nimport optax\n\nimport dm_pix as pix\nimport einops\nimport jax\nimport jax.numpy as jnp\nimport flax.linen as nn\nimport numpy as np\nimport orbax.checkpoint as ocp\nfrom PIL import Image, ImageDraw\nimport tyro\nfrom flax import nnx\n\nfrom genie import Genie\nfrom utils.dataloader import get_dataloader\n\n\n@dataclass\nclass Args:\n # Experiment\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data/coinrun_episodes""\n checkpoint: str = """"\n # Sampling\n batch_size: int = 1\n maskgit_steps: int = 25\n temperature: float = 1.0\n sample_argmax: bool = True\n start_frame: int = 0\n # Tokenizer checkpoint\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n # LAM checkpoint\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n # Dynamics checkpoint\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\nif __name__ == ""__main__"":\n """"""\n Dimension keys:\n B: batch size\n T: number of input (conditioning) frames\n N: number of patches per frame\n S: sequence length\n H: height\n W: width\n E: B * (S - 1)\n """"""\n jax.distributed.initialize()\n\n rng = jax.random.key(args.seed)\n\n # --- Load Genie checkpoint ---\n rngs = nnx.Rngs(rng)\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=False,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n checkpoint_options = ocp.CheckpointManagerOptions(\n step_format_fixed_length=6,\n )\n checkpoint_manager = ocp.CheckpointManager(\n args.checkpoint,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n dummy_tx = optax.adamw(\n learning_rate=optax.linear_schedule(0.0001, 0.0001, 10000),\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n dummy_optimizer = nnx.Optimizer(genie, dummy_tx)\n\n abstract_optimizer = nnx.eval_shape(lambda: dummy_optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(dummy_optimizer, restored_optimizer_state)\n\n # --- Define sampling function ---\n def _sampling_fn(model: Genie, batch: dict) -> jax.Array:\n """"""Runs Genie.sample with pre-defined generation hyper-parameters.""""""\n return model.sample(\n batch,\n args.seq_len,\n args.maskgit_steps,\n args.temperature,\n args.sample_argmax,\n )\n\n # --- Define autoregressive sampling loop ---\n @nnx.jit\n def _autoreg_sample(rng, video_batch_BSHWC, action_batch_E):\n input_video_BTHWC = video_batch_BSHWC[:, : args.start_frame + 1]\n rng, _rng = jax.random.split(rng)\n batch = dict(videos=input_video_BTHWC, latent_actions=action_batch_E, rng=_rng)\n generated_vid_BSHWC = _sampling_fn(genie, batch)\n return generated_vid_BSHWC\n\n # --- Get video + latent actions ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n args.batch_size,\n args.image_height,\n args.image_width,\n args.image_channels,\n # We don't use workers in order to avoid grain shutdown issues (https://github.com/google/grain/issues/398)\n num_workers=0,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n dataloader = iter(dataloader)\n video_batch_BSHWC = next(dataloader)\n gt_video = jnp.asarray(video_batch_BSHWC, dtype=jnp.float32) / 255.0\n video_batch_BSHWC = gt_video.astype(args.dtype)\n # Get latent actions for all videos in the batch\n batch = dict(videos=video_batch_BSHWC)\n action_batch_E = genie.vq_encode(batch, training=False)\n\n # --- Sample + evaluate video ---\n recon_video_BSHWC = _autoreg_sample(rng, video_batch_BSHWC, action_batch_E)\n recon_video_BSHWC = recon_video_BSHWC.astype(jnp.float32)\n gt = gt_video[:, : recon_video_BSHWC.shape[1]].clip(0, 1).reshape(-1, *gt_video.shape[2:])\n recon = recon_video_BSHWC.clip(0, 1).reshape(-1, *recon_video_BSHWC.shape[2:])\n ssim = jnp.asarray(\n pix.ssim(gt[:, args.start_frame + 1 :], recon[:, args.start_frame + 1 :])\n ).mean()\n print(f""SSIM: {ssim}"")\n\n # --- Construct video ---\n true_videos = (gt_video * 255).astype(np.uint8)\n pred_videos = (recon_video_BSHWC * 255).astype(np.uint8)\n video_comparison = np.zeros((2, *recon_video_BSHWC.shape), dtype=np.uint8)\n video_comparison[0] = true_videos[:, : args.seq_len]\n video_comparison[1] = pred_videos\n frames = einops.rearrange(video_comparison, ""n b t h w c -> t (b h) (n w) c"")\n\n # --- Save video ---\n imgs = [Image.fromarray(img) for img in frames]\n # Write actions on each frame, on each row (i.e., for each video in the batch, on the GT row)\n B, S, _, _, _ = video_batch_BSHWC.shape\n action_batch_BSm11 = jnp.reshape(action_batch_E, (B, S-1, 1))\n for t, img in enumerate(imgs[1:]):\n d = ImageDraw.Draw(img)\n for row in range(action_batch_BSm11.shape[0]):\n action = action_batch_BSm11[row, t, 0]\n y_offset = row * video_batch_BSHWC.shape[2] + 2\n d.text((2, y_offset), f""{action}"", fill=255)\n imgs[0].save(\n f""generation_{time.time()}.gif"",\n save_all=True,\n append_images=imgs[1:],\n duration=250,\n loop=0,\n )\n",python,tab
|
3 |
+
2,2823,"sample.py",5162,0,"",python,selection_mouse
|
4 |
+
3,2829,"sample.py",5161,0,"",python,selection_command
|
1f15334ab7e6820c9fda17c961659882ef9853cc80f7356b9a9b22f286fd7389/crowd-code-f23be3cf-4da5-450c-91f1-df9de045459c1752656830830-2025_07_16-11.08.01.978/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-000d5684-56eb-441c-a6df-7ac4df8ff5c71752846982966-2025_07_18-15.57.40.939/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-005af936-919b-4c44-b3af-2ed3459192121752831699789-2025_07_18-11.43.38.119/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-05016444-b54b-4934-b340-97e6db49021a1753717457401-2025_07_28-17.45.12.572/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-08c8f2de-5ecb-431d-b59f-5255a2b202341751712939209-2025_07_05-12.56.07.377/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-08ecc06e-32e0-4a9b-b785-5fb84b3f32de1751802304467-2025_07_06-13.46.47.722/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-12b522dd-8518-4c62-b207-ca1ed4ce90571752782954186-2025_07_17-22.10.14.626/source.csv
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,4,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom genie import Genie, restore_genie_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n warmup_steps: int = 5000\n lr_schedule : str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 8\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 8\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n # Dynamics\n dyna_dim: int = 512\n dyna_num_blocks: int = 12\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n use_maskgit: bool = False\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(params, state, inputs):\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n logits = outputs[""token_logits""]\n targets = outputs[""video_tokens""]\n\n # if not args.use_maskgit:\n # logits = outputs[""token_logits""][:, :, :-1]\n # targets = outputs[""video_tokens""][:, :, 1:]\n # mask = outputs[""mask""][:, :, 1:] \n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n logits, targets\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = logits.argmax(-1) == targets\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(logits)\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=logits.max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n """"""Update state and compute metrics""""""\n grad_fn = jax.value_and_grad(dynamics_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n genie = Genie(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dyna_dim=args.dyna_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n use_maskgit=args.use_maskgit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n dummy_inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n action=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len), dtype=args.dtype\n ),\n mask_rng=_rng,\n )\n rng, _rng = jax.random.split(rng)\n init_params = genie.init(_rng, dummy_inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=genie.apply, params=init_params, tx=tx)\n\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n # Restore full dynamics model\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n train_state = restore_genie_components(\n train_state, replicated_sharding, grain_iterator, dummy_inputs, rng, args\n )\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n while step < args.num_steps:\n # for videos in dataloader:\n videos = np.load(""overfit_dir/corner_8repl.npy"")\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n while True:\n # --- Train step ---\n rng, _rng, _rng_dropout, _rng_mask = jax.random.split(rng, 4)\n\n inputs = dict(\n videos=videos,\n rng=_rng,\n dropout_rng=_rng_dropout,\n mask_rng=_rng_mask,\n )\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) #/ 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
3 |
+
2,590,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:10:14 PM [info] Activating crowd-code\n10:10:14 PM [info] Recording started\n10:10:14 PM [info] Initializing git provider using file system watchers...\n10:10:14 PM [info] Git repository found\n10:10:14 PM [info] Git provider initialized successfully\n10:10:14 PM [info] Initial git state: [object Object]\n",Log,tab
|
4 |
+
3,3709,"TERMINAL",0,0,"/bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt",,terminal_command
|
5 |
+
4,3743,"TERMINAL",0,0,"]633;E;2025-07-17 22:10:18 /bin/python3 /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/printEnvVariablesToFile.py /hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash/envVars.txt;541e3580-6a0f-4f0b-bb4c-198841a406f7]633;C",,terminal_output
|
6 |
+
5,3850,"TERMINAL",0,0,"]0;tum_cte0515@hkn1991:/hkfs/home/project/hk-project-p0023960/tum_cte0515/.cursor-server/extensions/ms-python.python-2024.12.3-linux-x64/python_files/deactivate/bash]633;D;0",,terminal_output
|
7 |
+
6,21978,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",0,0,"",python,tab
|
8 |
+
7,21982,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2750,0,"",python,selection_mouse
|
9 |
+
8,25341,"TERMINAL",0,0,"queue",,terminal_command
|
10 |
+
9,25354,"TERMINAL",0,0,"]633;E;2025-07-17 22:10:39 queue;8a8d6fe6-a600-49ab-ad7b-e3418768748b]633;C",,terminal_output
|
11 |
+
10,25432,"TERMINAL",0,0,"[?1049h[22;0;0t[1;18r(B[m[4l[?7h[H[2JEvery 1.0s: squeue --me[1;110Hhkn1991.localdomain: Thu Jul 17 22:10:39 2025[3;14HJOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)[4;12H3353884 accelerat interact tum_cte0 R 7:01:56\t 2 hkn[0508,0517][18;154H",,terminal_output
|
12 |
+
11,26402,"TERMINAL",0,0,"[1;148H40[4;60H7[18;154H",,terminal_output
|
13 |
+
12,27450,"TERMINAL",0,0,"[1;149H1[4;60H8[18;154H",,terminal_output
|
14 |
+
13,28557,"TERMINAL",0,0,"[1;149H2[4;60H9[18;154H",,terminal_output
|
15 |
+
14,29633,"TERMINAL",0,0,"[1;149H3[4;57H2:01[18;154H",,terminal_output
|
16 |
+
15,29809,"TERMINAL",0,0,"[18;1H[?1049l[23;0;0t\r[?1l>]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
|
17 |
+
16,34077,"TERMINAL",0,0,"scancel 3353884",,terminal_command
|
18 |
+
17,34117,"TERMINAL",0,0,"]633;E;2025-07-17 22:10:48 scancel 3353884;8a8d6fe6-a600-49ab-ad7b-e3418768748b]633;C]0;tum_cte0515@hkn1991:~/Projects/jafar]633;D;0",,terminal_output
|
19 |
+
18,100445,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2966,0,"",python,selection_mouse
|
20 |
+
19,102391,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2912,0,"",python,selection_command
|
21 |
+
20,102556,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2858,0,"",python,selection_command
|
22 |
+
21,102702,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2827,0,"",python,selection_command
|
23 |
+
22,102897,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2858,0,"",python,selection_command
|
24 |
+
23,103038,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2912,0,"",python,selection_command
|
25 |
+
24,103183,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2966,0,"",python,selection_command
|
26 |
+
25,103313,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2912,0,"",python,selection_command
|
27 |
+
26,103461,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2858,0,"",python,selection_command
|
28 |
+
27,103646,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2827,0,"",python,selection_command
|
29 |
+
28,103749,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2858,0,"",python,selection_command
|
30 |
+
29,103907,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2912,0,"",python,selection_command
|
31 |
+
30,104034,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2966,0,"",python,selection_command
|
32 |
+
31,104150,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",3010,0,"",python,selection_command
|
33 |
+
32,104180,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2966,0,"",python,selection_command
|
34 |
+
33,104347,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2912,0,"",python,selection_command
|
35 |
+
34,104508,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2858,0,"",python,selection_command
|
36 |
+
35,104634,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2827,0,"",python,selection_command
|
37 |
+
36,104768,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2800,0,"",python,selection_command
|
38 |
+
37,105312,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2801,0,"",python,selection_command
|
39 |
+
38,105809,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2802,0,"",python,selection_command
|
40 |
+
39,106322,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2803,0,"",python,selection_command
|
41 |
+
40,106363,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2804,0,"",python,selection_command
|
42 |
+
41,106416,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2805,0,"",python,selection_command
|
43 |
+
42,106454,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2806,0,"",python,selection_command
|
44 |
+
43,106498,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2807,0,"",python,selection_command
|
45 |
+
44,106499,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2808,0,"",python,selection_command
|
46 |
+
45,106500,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2809,0,"",python,selection_command
|
47 |
+
46,106539,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2810,0,"",python,selection_command
|
48 |
+
47,106557,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2811,0,"",python,selection_command
|
49 |
+
48,106584,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2812,0,"",python,selection_command
|
50 |
+
49,106634,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2843,0,"",python,selection_command
|
51 |
+
50,107209,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2897,0,"",python,selection_command
|
52 |
+
51,107343,"/hkfs/home/project/hk-project-p0023960/tum_cte0515/Projects/jafar/train_dynamics.py",2951,0,"",python,selection_command
|
53 |
+
52,109636,"slurm/jobs/mihir/horeka/yolo-runs/sampling.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\npython sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4 \\n --seq_len=2 \\n --data_dir $array_records_dir\n\n",shellscript,tab
|
54 |
+
53,114762,"TERMINAL",0,0,"",,terminal_focus
|
55 |
+
54,121002,"TERMINAL",0,0,"salloc --time=10:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5",,terminal_command
|
56 |
+
55,121089,"TERMINAL",0,0,"]633;E;2025-07-17 22:12:15 salloc --time=10:00:00 --partition=accelerated --nodes=1 --ntasks-per-node=1 --gres=gpu:1 --cpus-per-task=5;2fac4da8-d4f0-4d83-a6ce-f6776ed5ed51]633;Csalloc: Granted job allocation 3355871\r\n",,terminal_output
|
57 |
+
56,121223,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output
|
58 |
+
57,148351,"TERMINAL",0,0,"salloc: Nodes hkn0508 are ready for job\r\n",,terminal_output
|
59 |
+
58,149414,"TERMINAL",0,0,"]0;tum_cte0515@hkn0508:~/Projects/jafar[?2004h[tum_cte0515@hkn0508 jafar]$ ",,terminal_output
|
60 |
+
59,165129,"TERMINAL",0,0,"\r[K[tum_cte0515@hkn0508 jafar]$ ",,terminal_output
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-12e65989-9ed0-4f2d-9e6c-43231178a0e21751536307695-2025_07_03-11.53.05.72/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-1dc733b8-f415-4be5-b7dd-dc5953da5bb91753973887840-2025_07_31-16.58.50.401/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-21374577-8901-4de5-b364-53d577190c6a1752060363988-2025_07_09-13.26.43.890/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-25569aaa-6e77-4ce2-b9b6-8ae8c33420051753180192494-2025_07_22-12.30.11.399/source.csv
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,5,"slurm/jobs/mihir/horeka/yolo-runs/sampling.sh",0,0,"\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\n# source .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/$job_name/$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\ntokenizer_ckpt_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4\ndynamics_ckpt_dir=$1\necho $dynamics_ckpt_dir\n\nenv | grep SLURM\n\nsrun python sample.py \\n --checkpoint $dynamics_ckpt_dir \\n --dyna_dim=128 \\n --dyna_num_blocks=2 \\n --dyna_num_heads=4 \\n --seq_len=16 \\n --start_frame=10 \\n --data_dir $array_records_dir\n\n# srun python sample.py \\n # --checkpoint $dynamics_ckpt_dir \\n # --start_frame=0 \\n # --batch_size=12 \\n # --seq_len=2 \\n # --data_dir $array_records_dir\n",shellscript,tab
|
3 |
+
2,446,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:30:11 PM [info] Activating crowd-code\n12:30:11 PM [info] Recording started\n12:30:11 PM [info] Initializing git provider using file system watchers...\n12:30:11 PM [info] Git repository found\n12:30:11 PM [info] Git provider initialized successfully\n12:30:11 PM [info] Initial git state: [object Object]\n",Log,tab
|
4 |
+
3,1898,"slurm/jobs/mihir/horeka/yolo-runs/sampling.sh",0,0,"",shellscript,tab
|
5 |
+
4,8030,"TERMINAL",0,0,"cd checkpoints/big-runs/",,terminal_command
|
6 |
+
5,8072,"TERMINAL",0,0,"]633;E;2025-07-22 12:30:19 cd checkpoints/big-runs/;5fcdc89e-3b1c-4d05-a6d2-6f1ce0ba6ffb]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs]633;D;0",,terminal_output
|
7 |
+
6,10557,"TERMINAL",0,0,"cd tokenizer-lr-scaling/",,terminal_command
|
8 |
+
7,12047,"TERMINAL",0,0,"ls",,terminal_command
|
9 |
+
8,12130,"TERMINAL",0,0,"]633;E;2025-07-22 12:30:23 ls;5fcdc89e-3b1c-4d05-a6d2-6f1ce0ba6ffb]633;C[0m[01;34mtrain_tokenizer_lr_sweep_1e-4[0m [01;34mtrain_tokenizer_lr_sweep_5e-5[0m\r\n[01;34mtrain_tokenizer_lr_sweep_1e-4_8nodes[0m [01;34mtrain_tokenizer_lr_sweep_5e-5_8nodes[0m\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling]633;D;0",,terminal_output
|
10 |
+
9,16772,"TERMINAL",0,0,"cd train_tokenizer_lr_sweep_1e-4",,terminal_command
|
11 |
+
10,16797,"TERMINAL",0,0,"]633;E;2025-07-22 12:30:28 cd train_tokenizer_lr_sweep_1e-4;5fcdc89e-3b1c-4d05-a6d2-6f1ce0ba6ffb]633;C]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4]633;D;0",,terminal_output
|
12 |
+
11,17848,"TERMINAL",0,0,"ls",,terminal_command
|
13 |
+
12,17849,"TERMINAL",0,0,"]633;E;2025-07-22 12:30:28 ls;5fcdc89e-3b1c-4d05-a6d2-6f1ce0ba6ffb]633;C",,terminal_output
|
14 |
+
13,17862,"TERMINAL",0,0,"[0m[01;34m020000[0m [01;34m060000[0m [01;34m100000[0m [01;34m140000[0m [01;34m145000[0m [01;31m146000.zip[0m\r\n[01;34m040000[0m [01;34m080000[0m [01;34m120000[0m [01;34m144000[0m [01;34m146000[0m\r\n]0;tum_cte0515@hkn1993:/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/big-runs/tokenizer-lr-scaling/train_tokenizer_lr_sweep_1e-4]633;D;0",,terminal_output
|
15 |
+
14,21855,"TERMINAL",0,0,"cursor .",,terminal_command
|
16 |
+
15,21897,"TERMINAL",0,0,"]633;E;2025-07-22 12:30:33 cursor .;5fcdc89e-3b1c-4d05-a6d2-6f1ce0ba6ffb]633;C",,terminal_output
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-268e2d5f-0a66-4008-8495-15de70c8a2e51751028407664-2025_06_27-14.47.06.44/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-2d2437e1-caa5-4315-a7d9-4d9478073a161750944609503-2025_06_26-15.30.55.51/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-2f5e552b-d86c-4a34-a644-139d05fcf0731753100718217-2025_07_21-14.25.46.738/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-30efafeb-a59f-45ff-9626-651f3a2526631753351953527-2025_07_24-12.13.09.956/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-35296db0-5985-4518-8394-cca2184ed6c11751305990184-2025_06_30-19.53.48.49/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
927a8af5474e5654810c00ce2e09fd2de87d3e5722f33fa1090d867db114e403/crowd-code-3553d16e-f1c9-4e9c-9425-6b663caf1f311753957765078-2025_07_31-12.30.02.749/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|