Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- 05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-19062f93-c43f-40da-a8e7-aee672713bd11750887279735-2025_06_25-23.35.33.315/source.csv +0 -0
- 05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-966f6435-b7a4-4869-9e11-2847af1349b81750947034236-2025_06_26-16.10.57.310/source.csv +320 -0
- 05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-9ff54a43-2a59-41a8-96bc-f7e46d5244651750887279734-2025_06_25-23.36.32.560/source.csv +3 -0
- 05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-aa8be5f9-c447-4faf-b9c6-7142909b3c591750719092446-2025_06_24-00.51.37.15/source.csv +6 -0
- 05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-cb92c7b2-f6e4-4d49-91cb-88397630081c1750964172563-2025_06_26-20.56.24.104/source.csv +0 -0
- 05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-cf821b01-6b29-41b7-8f83-b619919a05b21750839417809-2025_06_25-10.17.13.417/source.csv +0 -0
- 05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-fe02aeb3-604a-4819-a48b-84d43ac5b72c1751037770876-2025_06_27-17.23.08.156/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-05830693-fec2-4daa-bf9a-df97d3f440b31752570818732-2025_07_15-11.14.44.738/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-062d67da-7ae5-4d1c-b9f1-a7d011641a591752908369905-2025_07_19-09.00.11.894/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-172d798b-bee8-455a-b904-9dd3fe6387d51754411154298-2025_08_05-18.25.56.221/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-1e500cb5-2752-4243-8002-15d1ebb40f691751798498690-2025_07_06-12.42.01.118/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-222fe98e-29ac-4b20-9a65-fe2e31f8eb701751128122769-2025_06_28-09.28.47.536/source.csv +29 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-26402469-72d0-4ef2-8121-ec0cf7ef56091752689884853-2025_07_16-20.18.54.768/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-280dfe9f-0f96-4f1d-a304-44e1237157a81754289117685-2025_08_04-08.32.51.461/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2bb200ce-4bc8-4bc3-9354-29e24db5d38e1752063967983-2025_07_09-14.26.42.463/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2c44fb8b-ed49-4624-90ac-19d20bbba3331752833225757-2025_07_18-12.07.47.556/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2f4ff312-abac-4732-805a-ec6c245e532e1752745206273-2025_07_17-11.40.45.491/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-33e2e492-00ca-4b0f-8523-da20187ea5301751307587112-2025_06_30-20.21.41.519/source.csv +53 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3d995f1f-4c62-4f98-bebc-a37f170682391753352025448-2025_07_24-12.14.24.225/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3dde1b0c-c963-467e-aa73-fb6c54df3ae41751963426964-2025_07_08-10.30.57.271/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3e0d9e2e-4ec4-4d1f-bec7-d4425f5cfe1a1751911970182-2025_07_07-20.13.17.83/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3f1ee1c9-e4cc-4ce6-b748-f84ba0479b8d1752061696547-2025_07_09-13.48.48.273/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-40a031fc-4dc5-4408-a821-8344d9146d941752833855790-2025_07_18-12.18.25.998/source.csv +115 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-41b294b4-b89c-4c1d-8a02-14afc9168dc41753085667665-2025_07_21-10.15.04.628/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4457e5d2-f5e8-4b15-95aa-bafa247369991751528947759-2025_07_03-09.50.10.663/source.csv +45 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4624dad2-2cdd-4760-a1d4-90d8cf5390e11752824923646-2025_07_18-09.49.17.759/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-46ca2edf-d936-40bf-b5cb-2bace160abae1751617999883-2025_07_04-16.08.06.165/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4719c5f9-1b15-4792-8afd-690761108bda1751617825355-2025_07_04-10.31.22.581/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-48fbb415-6db9-4d35-b548-561e828791bf1751383187013-2025_07_01-17.19.57.60/source.csv +4 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4925dddc-46dd-4a2d-8d37-761ea748b28d1753197051515-2025_07_22-17.10.58.842/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4c99cdc3-e81e-4e53-b728-37b9467466201751794872067-2025_07_06-11.41.50.869/source.csv +63 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4d1a0deb-e1f5-442d-9c20-e0948c9dbbd21751438388691-2025_07_02-08.40.20.700/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53035602-cd5a-4dad-bc79-2cb4d8d4f7681751162692203-2025_06_28-19.04.53.413/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53473ec0-46ca-440e-9f03-b85aa9497ebf1751358924978-2025_07_01-10.36.06.808/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53a6d9e8-1004-4f9b-86d8-e54e29f62e511751401053914-2025_07_01-22.18.23.766/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-5b1a6152-1602-4538-a4b1-6fa9507221151753212707189-2025_07_22-21.32.36.855/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-5e81a867-cde0-4985-b3ad-048a43f856df1751310142803-2025_06_30-12.02.24.285/source.csv +79 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-6614bc95-283e-4f09-b0e7-458aba09077f1754428379797-2025_08_05-23.14.08.79/source.csv +5 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-68bb0852-886e-4fc0-9144-b85c1ade71281752827220294-2025_07_18-10.27.35.446/source.csv +9 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-73fae8d1-0502-4ab4-bd59-8818555f36691753200603120-2025_07_22-18.10.30.380/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-76073275-4388-463f-8e12-ce34ee46fad51752495312029-2025_07_14-14.15.14.704/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-7879e034-f897-48e8-8481-1a87a73b0dc81752135543307-2025_07_10-10.19.09.565/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-79bdcdff-1889-496c-9bc9-830e887f70d81751447790479-2025_07_02-11.39.26.104/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-81dc70dc-8e01-48a6-9a00-9349b9f9a4171751541780271-2025_07_03-13.23.33.804/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-84bc9952-c4b0-4456-bdc2-984faf53684f1751163593750-2025_06_28-19.19.55.196/source.csv +10 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-88e0ea71-dd7c-4ca4-b50e-d557352226511751452791721-2025_07_02-12.40.17.73/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-8a6a12ae-3e93-40d9-8a49-8ed320340ef81751310132462-2025_06_30-21.03.13.314/source.csv +443 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-8e7b7877-c553-4d5c-a7c5-433adcd8112b1754287948136-2025_08_04-08.12.35.154/source.csv +0 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-97725341-21b1-49e4-a29b-2639a6d35d101753197054575-2025_07_22-17.11.28.789/source.csv +50 -0
- 1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-9837d221-0431-4205-9f56-6cdee8f0c0181752692224585-2025_07_16-20.57.34.901/source.csv +0 -0
05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-19062f93-c43f-40da-a8e7-aee672713bd11750887279735-2025_06_25-23.35.33.315/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-966f6435-b7a4-4869-9e11-2847af1349b81750947034236-2025_06_26-16.10.57.310/source.csv
ADDED
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,13,"slurm/dev/alfred/train_dyn_dev/train_dyn_single_batch.sh",0,0,"module unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv_jafar/bin/activate\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\n\nCHECKPOINT_DIR=$ws_dir/checkpoints/lam/$job_name_$slurm_job_id\nmkdir -p $CHECKPOINT_DIR\n\n\ndata_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards'\n\n\nlam_checkpoint='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/lam/3277318/lam_1750444032_10000'\ntokenizer_checkpoint='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/checkpoints/tokenizer/3273828/tokenizer_1750269881_15500'\n\n# srun python train_dynamics_single_batch.py \\nsrun python train_dynamics_single_batch.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --lam_checkpoint $lam_checkpoint \\n --tokenizer_checkpoint $tokenizer_checkpoint \\n --batch_size=12 \\n --log_checkpoint_interval=1000 \\n --log_image_interval=10 \\n --seed=0 \\n --min_lr=0.0000433 \\n --max_lr=0.0000433 \\n --log \\n --entity instant-uv \\n --data_dir $data_dir \\n --project jafar\n",shellscript,tab
|
3 |
+
2,1380,"slurm/dev/alfred/train_dyn_dev/train_dyn_single_batch.sh",1047,0,"",shellscript,selection_mouse
|
4 |
+
3,9538,"TERMINAL",0,0,"scancel 3296400",,terminal_command
|
5 |
+
4,9565,"TERMINAL",0,0,"]633;E;2025-06-26 16:11:06 scancel 3296400;7a5b3bb0-cefb-4a99-9c32-203d8b5f46a4]633;C]0;tum_ind3695@hkn1993:~/projects/jafar]633;D;0",,terminal_output
|
6 |
+
5,12102,"TERMINAL",0,0,"bash",,terminal_focus
|
7 |
+
6,15193,"TERMINAL",0,0,"salloc --account=hk-project-p0023960 --time=01:00:00 --partition=accelerated --nodes=1 --gres=gpu:1 --cpus-per-task=8 --ntasks-per-node=1",,terminal_command
|
8 |
+
7,15272,"TERMINAL",0,0,"]633;E;2025-06-26 16:11:12 salloc --account=hk-project-p0023960 --time=01:00:00 --partition=accelerated --nodes=1 --gres=gpu:1 --cpus-per-task=8 --ntasks-per-node=1;c3c435fb-0f2b-4ed8-be5d-200bc5c3306f]633;Csalloc: Pending job allocation 3296435\r\nsalloc: job 3296435 queued and waiting for resources\r\n",,terminal_output
|
9 |
+
8,15978,"TERMINAL",0,0,"^Csalloc: Job allocation 3296435 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_ind3695@hkn1993:~/projects/jafar]633;D;1",,terminal_output
|
10 |
+
9,16211,"TERMINAL",0,0,"^C",,terminal_command
|
11 |
+
10,16222,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;c3c435fb-0f2b-4ed8-be5d-200bc5c3306f]633;C]0;tum_ind3695@hkn1993:~/projects/jafar]633;D",,terminal_output
|
12 |
+
11,20351,"TERMINAL",0,0,"salloc --account=hk-project-p0023960 --time=01:00:00 --partition=accelerated --nodes=1 --gres=gpu:4 --cpus-per-task=8 --ntasks-per-node=4",,terminal_command
|
13 |
+
12,20425,"TERMINAL",0,0,"]633;E;2025-06-26 16:11:17 salloc --account=hk-project-p0023960 --time=01:00:00 --partition=accelerated --nodes=1 --gres=gpu:4 --cpus-per-task=8 --ntasks-per-node=4;c3c435fb-0f2b-4ed8-be5d-200bc5c3306f]633;Csalloc: Pending job allocation 3296436\r\nsalloc: job 3296436 queued and waiting for resources\r\n",,terminal_output
|
14 |
+
13,28940,"TERMINAL",0,0,"bash",,terminal_focus
|
15 |
+
14,28960,"TERMINAL",0,0,"salloc",,terminal_focus
|
16 |
+
15,28962,"TERMINAL",0,0,"bash",,terminal_focus
|
17 |
+
16,31131,"TERMINAL",0,0,"queuesldkfj;^C",,terminal_command
|
18 |
+
17,31141,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;7a5b3bb0-cefb-4a99-9c32-203d8b5f46a4]633;C]0;tum_ind3695@hkn1993:~/projects/jafar]633;D",,terminal_output
|
19 |
+
18,33456,"TERMINAL",0,0,"squeue",,terminal_command
|
20 |
+
19,33491,"TERMINAL",0,0,"]633;E;2025-06-26 16:11:30 squeue;7a5b3bb0-cefb-4a99-9c32-203d8b5f46a4]633;C JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\r\n 3296436 accelerat interact tum_ind3 PD 0:00 1 (Priority)\r\n]0;tum_ind3695@hkn1993:~/projects/jafar]633;D;0",,terminal_output
|
21 |
+
20,36042,"TERMINAL",0,0,"^C",,terminal_command
|
22 |
+
21,36062,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]633;E;;7a5b3bb0-cefb-4a99-9c32-203d8b5f46a4]633;C]0;tum_ind3695@hkn1993:~/projects/jafar]633;D",,terminal_output
|
23 |
+
22,36495,"TERMINAL",0,0,"salloc",,terminal_focus
|
24 |
+
23,36775,"TERMINAL",0,0,"^Csalloc: Job allocation 3296436 has been revoked.\r\nsalloc: Job aborted due to signal\r\n]0;tum_ind3695@hkn1993:~/projects/jafar]633;D;1",,terminal_output
|
25 |
+
24,43729,"TERMINAL",0,0,"salloc --account=hk-project-p0023960 --time=00:30:00 --partition=accelerated --nodes=1 --gres=gpu:4 --cpus-per-task=8 --ntasks-per-node=4",,terminal_command
|
26 |
+
25,43823,"TERMINAL",0,0,"]633;E;2025-06-26 16:11:40 salloc --account=hk-project-p0023960 --time=00:30:00 --partition=accelerated --nodes=1 --gres=gpu:4 --cpus-per-task=8 --ntasks-per-node=4;c3c435fb-0f2b-4ed8-be5d-200bc5c3306f]633;Csalloc: Pending job allocation 3296443\r\nsalloc: job 3296443 queued and waiting for resources\r\n",,terminal_output
|
27 |
+
26,44347,"TERMINAL",0,0,"bash",,terminal_focus
|
28 |
+
27,64105,"TERMINAL",0,0,"queue",,terminal_command
|
29 |
+
28,64151,"TERMINAL",0,0,"]633;E;2025-06-26 16:12:01 queue;7a5b3bb0-cefb-4a99-9c32-203d8b5f46a4]633;C",,terminal_output
|
30 |
+
29,64280,"TERMINAL",0,0,"[?1049h[22;0;0t[1;25r(B[m[4l[?7h[H[2JEvery 1.0s: squeue --me[1;190Hhkn1993.localdomain: Thu Jun 26 16:12:01 2025[3;14HJOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)[4;12H3296443 accelerat interact tum_ind3 PD\t0:00\t 1 (Priority)[25;234H",,terminal_output
|
31 |
+
30,65335,"TERMINAL",0,0,"[1;229H2[25d\t ",,terminal_output
|
32 |
+
31,66377,"TERMINAL",0,0,"[1;229H3[25d\t ",,terminal_output
|
33 |
+
32,67426,"TERMINAL",0,0,"[1;229H4[25d\t ",,terminal_output
|
34 |
+
33,68475,"TERMINAL",0,0,"[1;229H5[25d\t ",,terminal_output
|
35 |
+
34,69522,"TERMINAL",0,0,"[1;229H6[25d\t ",,terminal_output
|
36 |
+
35,70629,"TERMINAL",0,0,"[1;229H7[25d\t ",,terminal_output
|
37 |
+
36,71554,"TERMINAL",0,0,"[1;229H8[25d\t ",,terminal_output
|
38 |
+
37,72597,"TERMINAL",0,0,"[1;229H9[25d\t ",,terminal_output
|
39 |
+
38,73648,"TERMINAL",0,0,"[1;228H10[25d\t ",,terminal_output
|
40 |
+
39,74700,"TERMINAL",0,0,"[1;229H1[25d\t ",,terminal_output
|
41 |
+
40,75735,"TERMINAL",0,0,"[1;229H2[25d\t ",,terminal_output
|
42 |
+
41,77469,"TERMINAL",0,0,"[1;229H3[25d\t ",,terminal_output
|
43 |
+
42,78447,"TERMINAL",0,0,"[1;229H5[25d\t ",,terminal_output
|
44 |
+
43,79231,"TERMINAL",0,0,"[1;229H6[25d\t ",,terminal_output
|
45 |
+
44,79935,"TERMINAL",0,0,"[1;229H7[25d\t ",,terminal_output
|
46 |
+
45,80981,"TERMINAL",0,0,"[1;229H8[25d\t ",,terminal_output
|
47 |
+
46,82033,"TERMINAL",0,0,"[1;229H9[25d\t ",,terminal_output
|
48 |
+
47,83107,"TERMINAL",0,0,"[1;228H20[25d\t ",,terminal_output
|
49 |
+
48,84122,"TERMINAL",0,0,"[1;229H1[25d\t ",,terminal_output
|
50 |
+
49,85165,"TERMINAL",0,0,"[1;229H2[25d\t ",,terminal_output
|
51 |
+
50,86220,"TERMINAL",0,0,"[1;229H3[25d\t ",,terminal_output
|
52 |
+
51,87260,"TERMINAL",0,0,"[1;229H4[25d\t ",,terminal_output
|
53 |
+
52,88059,"TERMINAL",0,0,"[H[2J[7Ghkn1993.localdomain: Thu Jun 26 16:12:25 2025[3;14HJOBID PARTITION NAME USER ST[4;6HTIME NODES NODELIST(REASON)[5;12H3296443 accelerat interact tum_ind3 PD[6;6H0:00[16G1 (Priority)[25;51H",,terminal_output
|
54 |
+
53,89109,"TERMINAL",0,0,"[1;46H6[25;51H",,terminal_output
|
55 |
+
54,89875,"TERMINAL",0,0,"salloc",,terminal_focus
|
56 |
+
55,89998,"TERMINAL",0,0,"[H[2JEvery 1.0s: squeue --me[1;65Hhkn1993.localdomain: Thu Jun 26 16:12:27 2025[3;14HJOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)[4;12H3296443 accelerat interact tum_ind3 PD\t0:00\t 1 (Priority)[25;109H",,terminal_output
|
57 |
+
56,90380,"TERMINAL",0,0,"[H[2JEvery 1.0s: squeue --me[1;134Hhkn1993.localdomain: Thu Jun 26 16:12:27 2025[3;14HJOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)[4;12H3296443 accelerat interact tum_ind3 PD\t0:00\t 1 (Priority)[25;178H",,terminal_output
|
58 |
+
57,91418,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
59 |
+
58,92453,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
60 |
+
59,93501,"TERMINAL",0,0,"[1;172H30[25d\t ",,terminal_output
|
61 |
+
60,94544,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
62 |
+
61,95583,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
63 |
+
62,96633,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
64 |
+
63,97685,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
65 |
+
64,98724,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
66 |
+
65,100444,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
67 |
+
66,101449,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
68 |
+
67,102450,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
69 |
+
68,103446,"TERMINAL",0,0,"[1;172H40[25d\t ",,terminal_output
|
70 |
+
69,103938,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
71 |
+
70,104866,"TERMINAL",0,0,"watch",,terminal_focus
|
72 |
+
71,104983,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
73 |
+
72,105929,"TERMINAL",0,0,"[25;1H[?1049l[23;0;0t\r[?1l>]0;tum_ind3695@hkn1993:~/projects/jafar]633;D;0",,terminal_output
|
74 |
+
73,107187,"TERMINAL",0,0,"idle",,terminal_command
|
75 |
+
74,107230,"TERMINAL",0,0,"]633;E;2025-06-26 16:12:44 idle;7a5b3bb0-cefb-4a99-9c32-203d8b5f46a4]633;CPartition dev_cpuonly : 12 nodes idle\r\nPartition cpuonly : 4 nodes idle\r\nPartition dev_accelerated : 2 nodes idle\r\nPartition accelerated : 0 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 7 nodes idle\r\n]0;tum_ind3695@hkn1993:~/projects/jafar]633;D;0",,terminal_output
|
76 |
+
75,164421,"TERMINAL",0,0,"salloc",,terminal_focus
|
77 |
+
76,166699,"TERMINAL",0,0,"bash",,terminal_focus
|
78 |
+
77,168379,"TERMINAL",0,0,"queue",,terminal_command
|
79 |
+
78,168430,"TERMINAL",0,0,"]633;E;2025-06-26 16:13:45 queue;7a5b3bb0-cefb-4a99-9c32-203d8b5f46a4]633;C",,terminal_output
|
80 |
+
79,168489,"TERMINAL",0,0,"[?1049h[22;0;0t[1;25r(B[m[4l[?7h[H[2JEvery 1.0s: squeue --me[1;134Hhkn1993.localdomain: Thu Jun 26 16:13:45 2025[3;14HJOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)[4;12H3296443 accelerat interact tum_ind3 PD\t0:00\t 1 (Priority)[25;178H",,terminal_output
|
81 |
+
80,169537,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
82 |
+
81,170586,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
83 |
+
82,171632,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
84 |
+
83,172677,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
85 |
+
84,173724,"TERMINAL",0,0,"[1;172H50[25d\t ",,terminal_output
|
86 |
+
85,174771,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
87 |
+
86,175802,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
88 |
+
87,176851,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
89 |
+
88,177908,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
90 |
+
89,178952,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
91 |
+
90,179996,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
92 |
+
91,181042,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
93 |
+
92,182093,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
94 |
+
93,183139,"TERMINAL",0,0,"[1;170H4:00[25d\t ",,terminal_output
|
95 |
+
94,184189,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
96 |
+
95,185242,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
97 |
+
96,186291,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
98 |
+
97,187345,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
99 |
+
98,188391,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
100 |
+
99,189435,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
101 |
+
100,190479,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
102 |
+
101,191532,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
103 |
+
102,192589,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
104 |
+
103,193645,"TERMINAL",0,0,"[1;172H10[25d\t ",,terminal_output
|
105 |
+
104,194700,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
106 |
+
105,195739,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
107 |
+
106,196777,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
108 |
+
107,197824,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
109 |
+
108,198865,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
110 |
+
109,199921,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
111 |
+
110,200966,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
112 |
+
111,202076,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
113 |
+
112,203069,"TERMINAL",0,0,"[1;172H20[25d\t ",,terminal_output
|
114 |
+
113,204130,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
115 |
+
114,205190,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
116 |
+
115,206243,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
117 |
+
116,207313,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
118 |
+
117,208347,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
119 |
+
118,209415,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
120 |
+
119,210460,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
121 |
+
120,211515,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
122 |
+
121,212561,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
123 |
+
122,213615,"TERMINAL",0,0,"[1;172H30[25d\t ",,terminal_output
|
124 |
+
123,214662,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
125 |
+
124,215706,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
126 |
+
125,216778,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
127 |
+
126,217830,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
128 |
+
127,218878,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
129 |
+
128,219924,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
130 |
+
129,220966,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
131 |
+
130,222014,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
132 |
+
131,223051,"TERMINAL",0,0,"[1;172H40[25d\t ",,terminal_output
|
133 |
+
132,224093,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
134 |
+
133,225454,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
135 |
+
134,226445,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
136 |
+
135,227448,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
137 |
+
136,229454,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
138 |
+
137,229456,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
139 |
+
138,230450,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
140 |
+
139,231445,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
141 |
+
140,232628,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
142 |
+
141,233524,"TERMINAL",0,0,"[1;172H50[25d\t ",,terminal_output
|
143 |
+
142,234569,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
144 |
+
143,235611,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
145 |
+
144,236652,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
146 |
+
145,237711,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
147 |
+
146,238755,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
148 |
+
147,239805,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
149 |
+
148,240854,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
150 |
+
149,241909,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
151 |
+
150,243070,"TERMINAL",0,0,"[1;170H5:00[25d\t ",,terminal_output
|
152 |
+
151,244104,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
153 |
+
152,245150,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
154 |
+
153,246198,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
155 |
+
154,247242,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
156 |
+
155,248309,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
157 |
+
156,249342,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
158 |
+
157,250392,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
159 |
+
158,251436,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
160 |
+
159,252481,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
161 |
+
160,253522,"TERMINAL",0,0,"[1;172H10[25d\t ",,terminal_output
|
162 |
+
161,254582,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
163 |
+
162,255614,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
164 |
+
163,256661,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
165 |
+
164,257718,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
166 |
+
165,258768,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
167 |
+
166,259815,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
168 |
+
167,260866,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
169 |
+
168,261899,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
170 |
+
169,262948,"TERMINAL",0,0,"[1;172H20[25d\t ",,terminal_output
|
171 |
+
170,263993,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
172 |
+
171,265037,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
173 |
+
172,266090,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
174 |
+
173,267129,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
175 |
+
174,268178,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
176 |
+
175,269223,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
177 |
+
176,270264,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
178 |
+
177,271318,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
179 |
+
178,272354,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
180 |
+
179,273392,"TERMINAL",0,0,"[1;172H30[25d\t ",,terminal_output
|
181 |
+
180,274436,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
182 |
+
181,275490,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
183 |
+
182,276534,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
184 |
+
183,277587,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
185 |
+
184,278642,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
186 |
+
185,279690,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
187 |
+
186,280740,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
188 |
+
187,281787,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
189 |
+
188,282825,"TERMINAL",0,0,"[1;172H40[25d\t ",,terminal_output
|
190 |
+
189,283876,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
191 |
+
190,284915,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
192 |
+
191,285963,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
193 |
+
192,287006,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
194 |
+
193,288060,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
195 |
+
194,289110,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
196 |
+
195,290162,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
197 |
+
196,291251,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
198 |
+
197,292259,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
199 |
+
198,293310,"TERMINAL",0,0,"[1;172H50[25d\t ",,terminal_output
|
200 |
+
199,294367,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
201 |
+
200,295416,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
202 |
+
201,296475,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
203 |
+
202,297520,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
204 |
+
203,298577,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
205 |
+
204,299611,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
206 |
+
205,300659,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
207 |
+
206,301714,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
208 |
+
207,302767,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
209 |
+
208,304048,"TERMINAL",0,0,"[1;170H6:00[25d\t ",,terminal_output
|
210 |
+
209,305092,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
211 |
+
210,306131,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
212 |
+
211,307176,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
213 |
+
212,308230,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
214 |
+
213,309268,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
215 |
+
214,310317,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
216 |
+
215,311374,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
217 |
+
216,312429,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
218 |
+
217,313470,"TERMINAL",0,0,"[1;172H10[25d\t ",,terminal_output
|
219 |
+
218,314513,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
220 |
+
219,315558,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
221 |
+
220,316607,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
222 |
+
221,317646,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
223 |
+
222,318686,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
224 |
+
223,319738,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
225 |
+
224,320789,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
226 |
+
225,321831,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
227 |
+
226,322876,"TERMINAL",0,0,"[1;172H20[25d\t ",,terminal_output
|
228 |
+
227,324143,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
229 |
+
228,324956,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
230 |
+
229,326005,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
231 |
+
230,327055,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
232 |
+
231,328123,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
233 |
+
232,329177,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
234 |
+
233,330397,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
235 |
+
234,331308,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
236 |
+
235,332347,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
237 |
+
236,333389,"TERMINAL",0,0,"[1;172H30[25d\t ",,terminal_output
|
238 |
+
237,334446,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
239 |
+
238,337470,"TERMINAL",0,0,"[1;173H2[25d\t [1;173H3[25d\t ",,terminal_output
|
240 |
+
239,338477,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
241 |
+
240,339465,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
242 |
+
241,340466,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
243 |
+
242,341466,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
244 |
+
243,342467,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
245 |
+
244,343462,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
246 |
+
245,344472,"TERMINAL",0,0,"[1;172H41[25d\t ",,terminal_output
|
247 |
+
246,345483,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
248 |
+
247,346465,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
249 |
+
248,347467,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
250 |
+
249,349473,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
251 |
+
250,349474,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
252 |
+
251,350468,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
253 |
+
252,351474,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
254 |
+
253,352476,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
255 |
+
254,353471,"TERMINAL",0,0,"[1;172H50[25d\t ",,terminal_output
|
256 |
+
255,354469,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
257 |
+
256,355472,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
258 |
+
257,356472,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
259 |
+
258,357475,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
260 |
+
259,359471,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
261 |
+
260,360467,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
262 |
+
261,361512,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
263 |
+
262,362469,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
264 |
+
263,363474,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
265 |
+
264,364471,"TERMINAL",0,0,"[1;170H7:00[25d\t ",,terminal_output
|
266 |
+
265,365472,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
267 |
+
266,366466,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
268 |
+
267,367471,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
269 |
+
268,368470,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
270 |
+
269,369469,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
271 |
+
270,370471,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
272 |
+
271,371050,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
273 |
+
272,372097,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
274 |
+
273,373137,"TERMINAL",0,0,"[1;172H10[25d\t ",,terminal_output
|
275 |
+
274,374285,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
276 |
+
275,375229,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
277 |
+
276,376283,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
278 |
+
277,377362,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
279 |
+
278,378384,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
280 |
+
279,379435,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
281 |
+
280,380488,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
282 |
+
281,381527,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
283 |
+
282,382577,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
284 |
+
283,383616,"TERMINAL",0,0,"[1;172H20[25d\t ",,terminal_output
|
285 |
+
284,385467,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
286 |
+
285,386467,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
287 |
+
286,387463,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
288 |
+
287,388467,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
289 |
+
288,389464,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
290 |
+
289,390465,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
291 |
+
290,391467,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
292 |
+
291,392474,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
293 |
+
292,393464,"TERMINAL",0,0,"[1;172H30[25d\t ",,terminal_output
|
294 |
+
293,394469,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
295 |
+
294,395474,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
296 |
+
295,396469,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
297 |
+
296,397475,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
298 |
+
297,398473,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
299 |
+
298,399481,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
300 |
+
299,400467,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
301 |
+
300,401466,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
302 |
+
301,402467,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
303 |
+
302,404469,"TERMINAL",0,0,"[1;172H40[25d\t ",,terminal_output
|
304 |
+
303,405475,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
305 |
+
304,406472,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
306 |
+
305,407465,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
307 |
+
306,409474,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
308 |
+
307,409478,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
309 |
+
308,410470,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
310 |
+
309,411472,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
311 |
+
310,412473,"TERMINAL",0,0,"[1;173H9[25d\t ",,terminal_output
|
312 |
+
311,413463,"TERMINAL",0,0,"[1;172H50[25d\t ",,terminal_output
|
313 |
+
312,414465,"TERMINAL",0,0,"[1;173H1[25d\t ",,terminal_output
|
314 |
+
313,415468,"TERMINAL",0,0,"[1;173H2[25d\t ",,terminal_output
|
315 |
+
314,416468,"TERMINAL",0,0,"[1;173H3[25d\t ",,terminal_output
|
316 |
+
315,417463,"TERMINAL",0,0,"[1;173H4[25d\t ",,terminal_output
|
317 |
+
316,418463,"TERMINAL",0,0,"[1;173H5[25d\t ",,terminal_output
|
318 |
+
317,419466,"TERMINAL",0,0,"[1;173H6[25d\t ",,terminal_output
|
319 |
+
318,420472,"TERMINAL",0,0,"[1;173H7[25d\t ",,terminal_output
|
320 |
+
319,421476,"TERMINAL",0,0,"[1;173H8[25d\t ",,terminal_output
|
05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-9ff54a43-2a59-41a8-96bc-f7e46d5244651750887279734-2025_06_25-23.36.32.560/source.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,18,"scripts/file_duplicate_checker.py",0,0,"import os\nfrom collections import defaultdict\nfrom tqdm import tqdm\n\ndef find_duplicate_filenames(root_dir):\n filenames = defaultdict(list)\n file_count = 0\n\n # Use tqdm with manual update and no percentage/ETA bar\n pbar = tqdm(desc=""Files scanned"", unit=""file"", dynamic_ncols=True, bar_format=""{desc}: {n_fmt}"")\n\n # Walk the directory recursively\n for dirpath, _, files in os.walk(root_dir):\n for file in files:\n full_path = os.path.join(dirpath, file)\n if os.path.isfile(full_path):\n filenames[file].append(full_path)\n file_count += 1\n pbar.update(1)\n\n pbar.close()\n\n # Print duplicates\n duplicates = {name: paths for name, paths in filenames.items() if len(paths) > 1}\n if duplicates:\n print(""\nDuplicate filenames found:\n"")\n for name, paths in duplicates.items():\n print(f""Filename: {name}"")\n for path in paths:\n print(f"" - {path}"")\n print()\n else:\n print(""\nNo duplicate filenames found."")\n\nif __name__ == ""__main__"":\n import sys\n if len(sys.argv) < 2:\n print(""Usage: python find_duplicates.py <directory_path>"")\n else:\n find_duplicate_filenames(sys.argv[1])\n\n",python,tab
|
3 |
+
2,521,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:36:32 PM [info] Activating crowd-code\n11:36:32 PM [info] Welcome back tum_ind3695. Your user-id is '507ab0ec0dfe0c18ad7778dd15e072f92367194c94623114de802c8ed9c52e20'. Happy coding!\n11:36:32 PM [info] Recording started\n",Log,tab
|
05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-aa8be5f9-c447-4faf-b9c6-7142909b3c591750719092446-2025_06_24-00.51.37.15/source.csv
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,12,"todos.md",0,0,"# Todo's\n\n## Misc:\n- [ ] Steuererklärung \n- [ ] jan wlan geld internet und paket\n- [ ] thanh geld 22 club\n- [ ] Arzt termine\n - [ ] normal\n - [ ] zahn\n- [ ] Proteinshake\n- [ ] Protein shaker\n- [ ] Zahnbürste\n- [ ] laptop?? ask stefan what to do with it\n\n\n## 23.06 Monday\n\n- [x] PR step duration\n- [x] generate samples from dyn\n- [x] single batch training overfit\n - [x] lam\n - [x] tokenizer\n - [ ] dynamics model\n\n- [ ] lr finding \n- [ ] pr: update sampling method for resolution \n\n- [ ] retrain lam until convergence?\n- [ ] see if data parallel is better than single gpu?\n\n- [ ] issue: make sampling faster\n- [ ] blog post crowd source\n\n- [ ] Preprocess entire minecraft dataset\n- [ ] Dont let tf see gpus\n- [ ] Look into dataloader\n\nQuestions:\n- optimal lr\n- optimal batch size\n- how to scale lr with batch size\n- how to test/evaluate lam performance?\n - how good are the actions for the lam\n - how many actions do we have?\n - how many actions do we need\n\n\n## 22.06 Sunday\nNotes:\n- How should tokenizer training behave?\n- How should lam training behave?\n- How should dynamics model training behave?\n- \n\nTODOS:\n- [x] look at prs jafar\n- [x] extension version update\n- [x] make big run directory\n- [x] look at the run of lam\n- [ ] train the dynamics model on the new models\n\n- [ ] start a coin run baseline\n - [ ] tokenizer\n - [ ] lam \n - [ ] dynamics model\n\n- [ ] move tfrecord to huggingface\n\n- [x] tierlist for thanh\n- [ ] move tfrecord to helmholtz\n- [ ] helmholtz setup\n- [ ] fix dataloader seeding\n\n- [ ] FFM homework\n- [ ] Tennis\n- [ ] gym\n\n\n\n## 20.06 Friday\n- [x] wäsche\n- [x] flaschen wegbringen\n- [x] zhs \n- [x] cursor setup\n- [x] run overfiting runs on lam\n\n## 19.06 Thursday\n- [x] extension ace palenight\n\n\n- [x] run overfiting runs on dynamics model (got aborted bc of branch switching)\n - [x] test multi gpu overfit on full tfrecord\n\n\n## 18.06 Wednesday\nJobs:\n- [x] run overfiting runs for tokenizer \n - [x] test multi gpu overfit on single batch\n - [x] test multi gpu overfit on single sample\n - [x] test multi gpu overfit on 10 tfrecord\n - [x] test multi gpu overfit on full tfrecord\n\n- [x] create tf record batch (1,4,8,16) sample from dataloader saven \n\n\n## 17.06 Tuesday\n- [x] cleanup Home\n- [x] cleanup ws shared\n\n## 13.06 Friday\n- [ ] Start job for single batch training (overfit dataset)\n- [ ] Make slides for presentation \n\n- [ ] helmholtz setup\n- [ ] move one tf record to helmholts\n\n## 12.06 Thursday\n- [x] Fix oom issue\n - Dataloader was caching\n- [x] find out biggest batch size for now\n- [x] start a run on one node with coinrun for 12h sbatch\n- [x] start a run on one node with minecraft \n- [x] cleanup ws\n- [x] cleanup wandb\n\n\n\nQuestions Dataloader:\n- [ ] What is one 'element' in the dataloader (Is it an episode or mutlitple episodes? Is it clips from an episode?)\n- [ ] What is the number of elements in the dataloader?\n- [ ] Why is shuffling so slow? Why does it have to shuffle the data and not the indices?\n- [ ] Does the dataloader currently shuffle per episode or per clip?\n- [ ] How do we get the best shuffle? (Optimally uniformly over all clips?)\n- [ ] Do we have to use chaching? Does it improve performance? If yes how much? Is it worth it?\n\nQuestions Training:\n- [ ] What is the best batch size? (What is the best way to get the optimal batch size? Just running it?? Can we calculate/estimate it?)\n- [ ] Can we just use the Genie hyp params or does our data paralelism change the optimal params? (Is the setup good enough?)\n\n\n## 11.06 Wednesday\n- [ ] Start coinrun training\n- [ ] start minecraft training\n\n## 10.06 Tuesday\n- [ ] First run with new pipeline\n- [ ] Zahnarzt\n- [ ] Flaschen wegbringen\n- [ ] \n- [ ] Blutbild und Schulter termin\n\n## 09.06 Monday\n- [x] Zahnarzt termin machen \n- [ ] Presenation Vorbereitung\n\nOffice:\n - [x] jan ikea\n - [x] ben geld sae trip\n - [x] namitha geld\n\n\nHome:\n - [ ] pa \n - [ ] jan wlan \n - [ ] Barmer geld impfung\n - [ ] physio geld\n\n\n## 08.06 Sunday\n- [ ] zimmer aufräumen\n - [ ] klamotten sofa\n - [ ] müll\n- [ ] küche \n - [ ] boden\n - [ ] müll\n- [ ] \n\n- [ ] video pipeline\n - [ ] clip the npy videos in 16 sec chunks with the designated ranges\n - [ ] 16sec video to npy?\n\n- [ ] readup data pipeline\n - [ ] read vpt \n - [ ] mineworld \n - [ ] genieN\n - [ ] 5k openai\n\n## 07.06 Saturday\n- [x] einkaufen \n - [x] soap \n - [x] öl \n\n## 06.06 Friday\n- [x] log idling gpus every 30mins\n- [x] make website for it on lxhalle\n\n- [ ] video pipeline \n - [x] split into 16 sec videos for datalabeling (training labeler)\n - [ ] verify results\n - [x] some videos are not 16sec\n \n- [x] move videos to new shared workspace\n\nNotes:\n- ffmpeg is bad at keeping length of videos if not re-encoding\n- takes the next best key frame if not re-encoding \n- encoding is super slow (20min video in 40 min instead of 11sec)\n\n## 05.06 Thursday\n- [x] write email to stefan for cluster group \n - [x] mihir \n - [x] me \n- [x] video pipeline\n - [x] convert mp4 to npy\n - [x] verify results\n\n\n## 03.06 Tuesday\n- [x] kit report \n- [x] karte sap\n\n\n## 30.05 Friday\n- [x] Empty bottles\n- [x] Groceries\n\n- [ ] random dataset for trainign\n- [ ] Macbook?\n- [ ] SAP card\n- [ ] Access to cluster\n- [ ] report for KIT\n\n\n## 28.05 Wednesday\n- [x] Data paralelism \n- [x] Sampling with mihir\n\n## 27.05 Tuesday\n- [ ] agents on envs\n- [ ] video dataset\n- [ ] Crafter/Cratax\n\n- [ ] other envs\n - [ ] procgen\n - [ ] gymretro\n - [ ] minecraft\n\n\n\n## 26.05 Monday\n- [x] data sharing\n- [x] 16 procgen envs\n - [x] 10mio frames pro env\n\nTraining times:\n1 node 1 env 10 tage \n1.5b param 5b frames 50 atari env 72k gpuh \n64gpus need 12 days\n\n\n\n## 24.05 Saturday\n- [x] email stefan compute access\n- [ ] data gen for the old envs\n\n- [ ] images from gymnax\n- [ ] craftax images\n\n- [ ] 1 page report for KIT\n- [ ] make BA presentation plan\n\n## 23.05 Friday\n- [x] Kit setup\n\n\n\n## 22.05 Thursday\n- [x] BBH\n - [x] Lemon grass\n - [x] koriander\n - [x] topf -> tam\n - [x] gym?\n- [x] Pr for data generation\n- [x] setup and run gymnax\n\n\n## 21.05 Wednesday\n\n- [ ] datageneration\n - [x] what do other ppl use? (jafar, openai, craftax)\n - [x] what is the format (compatibility)\n - [x] find good environments (easy ones for a good baseline)\n - [x] implement data generation script (so it jafar compatible)\n - [x] paramteer for env agents in procgen\n- [x] job for 2 simple environments\n\n*Notes*:\n- jafar uses procgen: https://github.com/openai/procgen\n - can easily generate 16 different environments\n- gym, gym3, ALE (atari)\n- backwards compatibility was broken by gym in version 0.26.*\n - have to downgrade to 0.25.* for gym3 to work with gym\n- todo: might have to save the seed into the meta data??\n\n*Sources*:\nGym: https://gymnasium.farama.org/\nGymnax: https://github.com/RobertTLange/gymnax#implemented-accelerated-environments-%EF%B8%8F\nCraftax: https://github.com/MichaelTMatthews/Craftax\n\n\n## 20.05 Tuesday\n- [ ] write email for the kit cluster access\n- [ ] local hpc access\n- [ ] florian bauer macbook \n\n- [ ] look through code\n - [ ] how does the training work?\n - [ ] how is it different\n\n - [ ] Geld for SEA trip\n- [ ] Barmer\n\n\n## 19.05 Monday \n- [x] read through papers for DLM \n- [x] BA upload\n - [x] notes from below\n - [x] signation\n\nBA notes:\ntraining-time\ninference-time\nfine-tune\nfinetune\nllm \npython\nin context\nfigure/FIG references\ntransformation appendix??\n\n\n## 18.05 Sunday \n- [x] Email Meinel Lärmstuff\n- [x] Verivox email schreiben\n- [x] BA grammarly \n\n\n\n## 16.05 Friday\n- [x] request Helmholtz HPC access\n- [x] Horeka access\n- [x] Franz fragen \n - [x] setup reward distribution\n - [ ] setup genie\n- [x] bashscripts\n- [x] Befragung TUM RDI\n\n## BA\n- [x] Acknowledgements\n- [ ] Read requirements\n - [x] Tum logos on the front page?\n- [ ] Figures\n - [x] architecture\n - [x] inference strat\n - [x] computation to performance\n- [ ] Appendix?\n - [x] Comprehensive list of tranformations \n - [ ] exmaple prompt\n- [ ] Fig 4.1:\n - Say that we remove the test task and use one of the other tasks as test task\n\n- [x] Figure layout\n- [x] Conclusion put the results in there \n\n\n## Week in Hotze\nMisc:\n- [ ] Steuererklärung \n- [ ] jan wlan geld internet und paket\n- [ ] thanh geld 22 club\n- [ ] Britts rezension \n- [ ] Rezension ha giang\n- [ ] Befragung TUM RDI\n- [ ] Arzt termine\n - [ ] normal\n - [ ] zahn\n- [ ] Email Meinel Lärmstuff\n\n## 10.05.25 Saturday\n- [x] BA Figures\n\n\n## 05.05.25 Monday\n- [x] theo paypalen 86 Euro Amazon\n- [x] Email Isabelle Helmholz\n\n\n## DONE\n- [x] Dokument Helmholz\n- [x] stuff bestellen\n - [x] topper\n - [x] picknickdecke\n - [x] reis\n\n- [x] Tasse für PA \n\n\n## 26.03.25 Tuesday\n- [ ] Kreissparkasse\n- [x] Singapore arrival card\n- [ ] Barmer\n - [ ] impfung\n- [ ] Verivox\n- [ ] \n\n- [x] Packen\n- [x] Meds\n- [x] Müll\n- [x] Toilette\n- [x] Strom ausstecken\n- [x] Fenster schließen\n\n\n\n\n\n\n## 14.03.25 Friday\n\n- [x] Masterbwerbung random\n- [x] emails \n\n- [ ] methods\n\n\n## 12.03.25 Wednesday\nSidequests:\n- [ ] Perso\n- [ ] Führerschein\n- [ ] Barmer karte\n- [ ] hostel singapur\n- [ ] arbeiten\n- [ ] mac abgeben\n\nBA:\n- [ ] background section\n- [ ] methods section\n- [ ] evaluation pipeline\n - [ ] get the submission files\n - [ ] evaluate the results (easy, med, hard)\n - [ ] plot\n- [ ] ds/ep scaling for the tasks that where not solved\n- [ ]\n\n## 11.03.25 Tuesday\nHiwi:\n- [x] Application HIWI\n- [x] master bewerbung\n\nBA:\n- [x] anmeldung BA\n\n- [x] Sparkasse Rückzahlung\n\nSide Quests:\n- [x] paket zurückgeben\n- [x] telekom/verivox thingy\n- [x] master bewerbung\n- [x] jacke flicken\n- [x] arbeiten\n\n## 10.03.25 Monday\n\n## 08.03.25 Saturday\n\n\n## 05.03.25 Wednesday\n\n\n\n\n## 04.03.25 Tuesday\nCleanup:\n- [ ] Wäsche\n- [x] Zimmer random\n- [x] Küche random\n- [x] Staubsaugen\n- [ ] Fenster putzen\n- [ ] Badezimmer putzen\n\nGeld:\n- [ ] Impfung\n- [ ] Hose\n- [ ] Verivox \n\nWork:\n- [ ] Basti\n- [ ] Urlaubausbezahlen?\n- [ ] \n\nBA:\n- [ ] fix inference????\n- [ ] fix transformation???\n\n\n## 03.03.23\n- [x] run predictions for epoch scaling\n- [x] run predictions for epoch scaling\n\n## 02.03.23 Sunday\n- [ ] look for errors in adapter creation\n - [ ] some transformation error\n\n- [x] run the predictions\n - [x] vllm setup\n - [x] run predictions for epoch scaling\n - [x] run predictions for epoch scaling\n - [x] run predictions for epoch scaling\n\n- [x] evaluate epoch scaling batch 1\n\n## 01.03.25 Saturday\n- [x] Run ttt for the experiments\n - [x] scaling epoch batch 2\n - [x] scaling data \n - [x] scaling data + base llama\n - [x] scaling epoch batch 1\n- [x] run prediction for the experiments\n - [x] all the scripts setup the scripts\n- [x] notes background section\n\n\n## 27.02.25 Thursday:\nJuwels Cluster setup:\n- [x] \n\n\nHoreka Cluster setup:\n- [x] get the repo up an running \n - [x] vpn\n - [x] clone (repo clean up)\n - [x] environment\n - [x] scp the adapters?\n - [x] run the creation of the adapters from scratch\n - [x] run predict\n\n\nArzt: \n- [x] Impfung\n\n\n## 24.02.25 Monday\n- [x] start adapter generation \n- [x] debugging the max seq length problem\n- [x] move adapters to scratch?\n\n- [ ] ai fasttrack\n- [ ] mcp\n\n\n\n## 18.02.25 Tuesday\n- [] Laundry (Towels)\n- [x] Groceries\n\n- [ ] Slides holen \n - [ ] eist \n - [ ] gad\n - [ ] \n- [ ] Cleanup room (30min)\n\n\n## 17.02.25 Monday\nBA:\n- [x] start prediction job\n\nAP:\n- [ ] ap test\n\n## 16.02.25 Sunday\n- [x] analyze the training jsons\n - [x] min/max\n - [x] debug\n\n- [x] run ttt with 2k samples\n - [x] which training jsons\n - [x] where are the ttt adapters save\n\n- [x] test the training/predict piepline\n - [x] run predict with 4 processes (one per gpu)\n - [x] evaluation pipeline?\n - [x] only time for creating the all adapters\n - [x] how to measure inference time?\n - [x] how to measure training time?\n\n- [x] Fix transformations \n - [x] debug the one that are too big?\n - [x] make it work until 1000\n - [x] make the transformations more random\n - [x] look for other transformations\n - [x] plot all the ones under 500?\n\n- [x] buy pants\n\n## 12.02.25 Wednesday\n- [ ] \n\n\n## 11.02.25 Dienstag\n- [x] Stefan Bauer schreiben für meeting\n\n- [x] Plan für die nächsten 6 Woche\n - [x] Aptitude Test\n - [ ] Bachelor Arbeit\n - [ ] Arbeit\n\nI’ll be unavailable for an hour because of a doctor's appointment from 10:30 to 11:30 later\n\nVietnam\n- [x] mama anrufen wegen vietnam\n- [x] Impfugen vietnam egerndorfer\n- [x] singapur flüge \n- [x] yu ling fragen wegen referal\n\n## 10.02.25 Monday\n- [x] Table of contents für Bachelorarbeit\n- [x] Repo + Template für Bachelorarbeit\n- [x] Plan für die nächsten 6 Woche\n - [x] Aptitude Test\n - [ ] Bachelor Arbeit\n - [ ] Arbeit\n\n## 09.02.25 Sonntag\n- [x] PA\n- [x] Wäsche \n- [x] Putzen\n - [x] Fenster Küche\n - [x] Badezimmer\n - [x] Zimmer\n- [x] Plan für vietnam\n - [x] Mia\n - [x] Ben\n- [x] Machine Learning HA\n\n## 07.02.25 Friday\n- [x] Paper submisssion\n\n## 06.02.25 Thursday\n- [x] sparkasse rückzahlung email schreibn\n- [x] zusage test\n- [x] barmer geld \n - [x] sepa lastschrift\n\n### Estimating difficulty\nFor var, ent and std\n- [x] linear regression for the plot (with L1 and L2)\n- [x] ground truth line/regression\n\n- [x] loop through sample length and create plots\n- [x] create one combined plot of all the ablations\n- [x] later do the prm as well \n- [ ] buckets at the end with accuracy?\n\n\n- N: number of samples\n- exp for samples (2^n)\n- linear for seq_len (stepsize 2)\n- for var entr std\n- put all lin regressions in one plot\n\n### Metrics\n- L1 (ground truth)\n- L2 (ground truth)\n\nWhen done \n- Buckets + accuracy\n- per class accuracy\n\n## 04.02.25 Tuesday\n- [x] Deep eval for search\n - [x] generate golden\n - [x] convert .md to .txt\n - [x] run some tests\n\n- [x] geschenk für Joe\n- [x] nach muc\n- [x] arbeiten\n- [x] wäsche\n\n## 03.02.25 Monday\n- [x] Evaluate the results\n - [x] reinstall vllm\n- [x] aptitude test questioning\n\n\n## 02.02.25 Sunday\n- [x] Zweitsteuer \n- [x] ICE-ticket\n- [x] tum geld semesterbeitrag\n- [x] zulip eignungstest\n\n## 31.01.25 Friday\n- [x] might have to adapt the *.bin_{adapter_num} to *_{adapter_num}.bin\n\n## 30.01.25 Thursday\n- [x] fix error in predict.py\n- [x] create adapter per iteration\n - [x] every 10 iterations till 100\n - [x] every 40 iterations till 500\n - [x] every 100 iterations till 1000\n - [x] every 200 iterations till 2000\n\n- [x] setup per iteration checkpointing\n- [x] write down ideas\n\n## 29.01.25 Wednesday\n\n- [x] started jobs for one epoch\n\n\n## 28.01.25 Tuesday\n- [x] put the models stats in the output.json\n- [x] run predict on 2000k 1 epoch and 2000k 2 epoch\n\n\nHypothesis: \n- [ ] harder problem need more samples\n- [ ] peaks for the problems are at different points\n- [ ] add the peaks together\n- [ ] how to predict these peaks\n\n- how to estimate the peak beforehand?\n-> stefan bauer \n\n\n- [ ] epoch 1 \n- [ ] check pointed while traingin\n- [ ] how does test time traing affect the performance?\n\n- [ ] how does it affect \n - [ ] othter test time scaling\n - [ ] HER\n - [ ] sampling size\n - [ ] test time search\n - [ ] get the logic\n - [ ] how could longer ttt affect the performance\n - [ ] test time search methods\n\n- [ ] easy is solving other tasks\n- [ ] how good is grouping?\n- [ ] costs\n\n\n- [ ] estimate the difficulty differently?\n- [ ] how does this difficulty scale \n\n\nbefore doing anything:\n- questions\n- what do I have to do\n- what do I have to investigate\n- \n\n- [ ] von den einzelen tasks wie verhält sich das?\n- [ ]\n\n\n- [ ] make a road map of all the things I need to do\n- [ ] change the epochs and test for one \n - [ ] then run the same thing but with 2000 samples and checkpoint\n- [ ] run with deepseek distill\n- [ ] run with deepseek distill on transduction tasks\n\n- [ ] deploy r1\n\n\n- [ ] for the 30 \n- [ ] model chart with total solved tasks\n- [ ] change the lora adapters numbers\n- [ ] change the amount of transformations\n- [ ] change the type of transformations\n- [ ] what is the inference startegy rn?\n- [ ] \n\n\n- [ ] other methods of test time scaling?\n - [ ] cot on reasoning traces\n - [ ] more sampling\n - [ ] look for more\n\n- [ ] fix the gpu errors on some tasks\n- [ ] see if more transformations are needed\n- [ ] distilled models on hard tasks?\n- [ ] might need some reasoning cues....\n- [ ] mehr samplen for inference\n\n\n## 27.01.25 Monday\n- [x] saving results with wandb (redundant)\n - llm typically trained on 1 epoch\n - validation would be next token prediction\n - arc uses accuracy as only metric (free verification and thats the task)\n- [x] move adapters to one folder\n- [x] check how the training is going when changeing the learning set\n- [x] create a dataset of only the hard tasks\n- [x] run training on the hard tasks\n\n- [x] viusalize hard/easy tasks\n - [x] find out hard task (solved by <40% of the modes)\n- [ ] see if the model transformation is working\n - get the train data size into the task.csv as well\n## 26.01.25 Sunday\n- [x] find out the duplicates???\n\n## 25.01.25 Saturday\n- [x] multi job on one node \n- [x] find out which tasks are not solved\n - [x] list of solved/unsolved tasks\n\n## 23.01.25 Thursday\n- [x] chrissie schreiben für getränke\n- [x] und list für ne gruppe machen \n- [x] Franzi schreiben\n- [x] get multigpu training to run (hell nah they rewrote torchtune)\n- [x] get dataset for FT\n- [x] Cluster setup mihir\n- [x] get BARC setup \n - [x] barc ft\n - [x] barc adapters\n- [x] Mihir 5e\n- [x] Sushi geld\n\n## 21.01.25 Tuesday\n- [x] work\n- [x] food with ching chongs\n- [x] get the stats\n- [x] start all finetuning jobs\n\n\n## 20.01.25 Monday\n- [ ] Start some lora finetuning\n- [ ] write stefan bauer\n- [ ] \n\n## 19.01.25 Sunday\n- [ ] find out which one got solved\n - [ ] from baseline 250\n - [ ] from ours\n - [ ] get list of adapters\n - [ ] get list of solved tasks\n\n- [ ] create lora adapters for all sizes\n - [x] verify number of training samples\n - [x] verify number of successfully created adapters\n - [ ] start jobs for create adpaters for 10, 50, 100, 200, 500, 1000 tasks\n\n- [ ] run prediction on all adapters\n - [ ] 10 \n - [ ] verify stats: solved/not solved\n\n\n- [ ] fix the spiky behavior\n- [ ] clean up repo\n\n- [ ] spikey behavior\n\n- [ ] Wlan rechnung -> jan\n- [ ] train 1b model on 250 tasks each, if possible\n - [ ] get the adapters\n - [ ] run the predict\n - [ ] see which one are not solve\n - [ ] run a loop on [10, 50, 100, 200, 500, 1000]\n- [ ] putzen\n- [ ] ML hausuaufgaben\n\n\n## 18.01.25 Saturday \n- [x] einkauf rechnung\n- [x] rechnungen circula\n- [x] email lmu\n- [x] stefan bauer email für recommendation letter for datathon\n- [x] email osapiens\n- [ ] bewerbungen\n - [ ] aws\n - [ ] \n\n\n## 17.01.25 Friday\n- [x] Gym Beine\n- [x] Email draften for mehul\n- [x] linkedin stuff\n- [x] ramakant stuff\n- [x] email for tech support \n\n## 16.01.25 Thursday\n- [x] salloc 4h \n- [x] are we training on json?\n [ ] \n\n## 15.01.25 Wednesday\n- [x] arbeiten\n- [x] raum meetup\n- [ ] \n\n## 14.01.25 Tuesday\n- [x] Erasmus bewerbung\n- [x] Arbeiten\n- [x] Email stefan bauer (cluster access, helping with writing, test runs, i have 60000h no 6000h)\n- [x] Email eikyun\n\n\nHi Stefan, \n\nwäre es möglich Mihir noch Cluster access zu geben? Das würde uns rein vom setup und zusammen arbeiten mega viel Zeit ersparen. \nAußerdem ist mir aufgefallen, dass es sich um 60.000 und nicht um 6.000 cpu stunden handelt. Ich habe noch ca 58.000h übrig. Das sollte erstmal ausreichen denke ich.\nIch würde mich melden falls ich mehr brauche:)\n\nLG Alfred\n\nKurzes Update:\nIch habe jetzt über das Wochende ein paar inference Tasks mit den gegebenen Lora Adaptern laufen lassen. Gerade schreibe ich die Repo um für multi-gpu finetuning damit wir unsere eigenen Adaptere trainieren können. \nAm Freitag haben wir noch ein kurzes Meeting mit Mehul Daminan, für den adaptive compute part (https://arxiv.org/abs/2410.04707, er hat auch am TTT Paper mitgearbeitet). \n\nShort update:\nI was ran some inference tasks with the given Lora Adapters. I am currently rewriting the repo to support multi-gpu finetuning so we can train our own adapters.\nOn Friday we had a short meeting with Mehul Daminan, for the adaptive compute part (https://arxiv.org/abs/2410.04707, he also contributed to the TTT paper).\n\n\n## 13.01.25 Monday\n- [ ] how to generate 5-1000 new tasks?\n- [ ] how transformations work?\n- [ ] Ranking for erasmus\n\n\n## 11.01.25 Saturday\n- [x] Machine learning HA hochladen\n- [x] ttt repo run\n\n- [x] ask stefan for cluster access (mihir and franz)\n- [x] put avocadoaling on iphone\n\n\n- [ ] read paper\n - [ ] other ttt\n - [ ] self improvement\n\n- [x] Text stefan bauer\n - relative confident in idee, but might need supervision and tips : biweekly, help with writing, experiments, etc.\n\n- [x] write core hypothesis and experiments\n- [x] filtern for notes\n\n\n## 10.01.25 Friday\n\n- [x] get TTT to work\n\n- [x] Machine learning hausaufgabe\n\n- [x] read paper\n - [x] stefan bauer\n - [x] ttt arc\n\n- [x] ergebnisse vom alten projekt\n- [x] Stefan Bauer meeting\n - [x] eventuell hiwi\n - [x] idee schicken\n - [x] paper schicken\n - [x] is it ok to be two ppl for the project?\n - [x] hat er plan\n - [x] ob er jmd kennt der plan hat?\n - [x] iclr workshop\n - [x] ob er leute kennt die ahnung hat \n - [x] already texted the mit dudes\n - [x] scaling is ass with arc approaches\n- [x] get access to slack again\n- [x] reward hacking?\n\n## 09.01.25 Thursday\n- [x] write fabian \n- [x] handwerker\n- [x] ramakant\n- [x] presentation of semantic search\n- [x] telekom magenta eins\n- [x] barmer gek stuff\n- [x] clothes for washing tmrw\n- [x] read paper: how hard to think\n- [x] jonas essen \n\n## 08.01.25 Wednesday\n- [x] project idea machen zum doc einladen\n- [x] nachricht an stefan schreiben \n- [x] TTT repo installation setup \n- [x] arbeiten\n- [x] project idea first draft\n\n## 07.01.25 Tuesday\n- [x] merging done\n- [x] Gym\n- [x] Fenstersanierung\n\n## 06.01.25 Monday\n- [x] Gym\n\n- [x] caching and get some results\n\n- [ ] clean up room \n - [x] couch \n - [x] table\n - [x] floor\n - [x] kitchenstuff\n - [x] vacuum floor\n- [x] clean up kitchen\n - [x] dishes \n - [x] table\n - [x] vacuum floor\n- [x] clean up bathroom\n - [x] vacuum floor\n\n- [x] Complete test run with deep - not possible took too long\n- [x] medium - """"""\n- [x] for the teststats\n- [ ] add test time training in there for the tasks that were not solved\n\n- [ ] why 10 min between runs?\n- [ ] some tasks are way easier than others\n- [ ] setup for running background processes on the compute node\n- [ ] write a visualization for the results\n - [ ] intermediategrid representation?\n - [ ] store solutions/reasoning?\n - [ ] jupyter notebook for visualization\n- [ ] how are the solutions stored?\n- [ ] make one full test run and compare it to the og setup with claude\n - [ ] get the times for one run\n - [ ] get the time for the whole dataset\n - [ ] log it somewhere?\n - [ ] get the numbers for claude setup\n - [ ] get the numbers for qwen70b\n- [ ] play around with setup \n - [ ] differne models\n - [ ] depth\n - [ ] deep\n - [ ] medium\n - [ ] shallow\n - [ ] representation\n - [ ] look at the solved/unsolved ratio and tasks\n - [ ] any insights?\n- [ ] test out with smaller models\n - [ ] lamma7b\n - [ ] qwencoder 32b\n - [ ] ...\n - [ ] might be able to run them in parralle on one node\n - [ ] test out with finetuned models\n- [ ] finetune some model on it?\n- [ ] think about other approaches \n - [ ] natural language approach?\n - [ ] use the finetuned model of the winners\n - [ ] other generation schemes\n- [ ] get claude bedrock running\n- [ ] clean up\n\n## 05.01.25 Sunday\n- [x] gym\n- [x] send jan his datev letter\n- [x] return adidas stuff\n\n- [x] list for mom\n - [x] keyboard \n - [x] tshirt\n\n## 04.01.2025\n- [x] write update for stefan \n\n## 03.01.25 Friday\n\nMaiborn:\n- [x] get the circular app\n\nBachelor:\n- [ ] implement bedrock in the pipeline\n- [ ] one test run with claude\n- [ ] debug error??\n- [ ] figure out: intermediate results / tmp files\n - [ ] saving the python scripts somewhere not /tmp/...\n - [ ] manual testing of runnning python script\n - [ ] see what the prompts are getting from the representation\n\nBewerbung:\n- [ ] erasmus\n - [ ] bewerbung rausschicken\n- [ ] arbeit @aws @google @nvidia @other_big_tech @lab?\n\n\n## 02.01.25 Thursday\nMaiborn:\n- [x] Times\n- [x] Mobility Budget entry\n\nBachelor:\n- [x] setup aws account\n- [x] get bedrock running\n\nMisc: \n- [x] Sehtest Brille und brille kaufen\n- [x] email wandstreichen\n\n\n## 31.12.24 Monday\nBestellung:\n- [x] Schuhe \n- [x] Socken\n- [ ] \n\nSteuererklärung:\n- [ ] Antrag\n- [ ] App holen\n\nMaiborn:\n- [ ] Times\n- [ ] Mobility Budget\n- [ ] \n\nBachelor:\n- [x] setup aws account\n- [ ] one test run with claude\n- [ ] debug error??\n- [ ] figure out: intermediate results / tmp files\n - [ ] saving the python scripts somewhere not /tmp/...\n - [ ] manual testing of runnning python script\n - [ ] see what the prompts are getting from the representation\n\n\nMisc:\n- [ ] Wlan rechnung -> jan\n- [ ] termin für wandstreichen\n\nBewerbung:\n- [x] master bei lmu\n- [ ] erasmus\n - [x] info\n - [ ] bewerbung rausschicken\n- [ ] arbeit @aws @google @nvidia @other_big_tech @lab?\n\n\n\n## Monday \n- [ ] play around with:\n - [ ] cot\n - [ ] ascii representation\n - [ ] prompts\n - [ ] \n\n## Tuesday\n- [ ]\n\n\n## Wednesday\n- [ ] \n\n\n## Thursday\n- [x] change logging \n - [x] normal logger class?\n - [x] save to other format and then logfire?\n\n- [x] trying to run qwen72B -> too big bc of quantization?\n\n- [ ] download other models\n - [x] vision models are ass\n - [x] chat models\n\nGiven this iq test. What common transformation do these rows follow?\n\nI have this iq test for you. \nInstructions: \n\nGiven these pairs of grid patterns where each pair shows a 'before' (left) and 'after' (right) transformation, please:\n 1. Identify the rule that defines which how to transform the 'before' pattern into the 'after' pattern\n 2. provide the common transformation occurring to the geometric shapes\n 3. explain your reasoning on how you came to this conclusion\n\n\n- [x] issues fixing linebreak\n- [x] trying to run qwen coder 32B\n - [x] figuring out the output format\n- [x] figure out how to change tree structure\n\n\n## Friday \n- [ ] maybe use a smaller model?\n- [ ] finetune it on vision tasks\n- [ ] use the other finetune for generation?\n- [ ] self learning approach?\n- [ ] use the dsl and mix to create new stuff\n- [ ] see what kind of tasks can only be solve transductively\n- [ ] what did the top \n\n## Sunday \n- [x] telekom handyvertrag\n- [x] telekom wlan vertrag\n\n\nhttps://www.youtube.com/watch?v=WK5XYG-dH-k&list=PL0oJ2_Q2jPrfkO2Bo8ljN10cShkjkaWzr\n\nhttps://www.youtube.com/watch?v=3yQqNCOYfJo&list=PLbFBnggbJ1rnaXljgzhGm0p_Co9kwfs3t\n\nhttps://www.youtube.com/watch?v=bUudx1cPiAA&list=PLgENJ0iY3XBiJ0jZ53HT8v9Qa3cch7YEV\n\nhttps://www.youtube.com/watch?v=BsJJUAGoFBc&list=PLu0hRahvlQEahuFlF_Dc0_1AMmI_UCLz8\n\nhttps://www.youtube.com/watch?v=0PfLQkUBgcI&list=PL45ZeKlPnPvB6UGmZAJoH57ukARvRNZv3\n\nhttps://www.youtube.com/watch?v=ASfVaQH_1kI&list=PL0oJ2_Q2jPrdt6JFbZtTi8H_XwgYXVRfI\n\nhttps://www.youtube.com/watch?v=DM52HxaLK-Y&list=PLI-n-55RUT--saxVQngjQA3er4QXM67Mt\n\n## Ikea\n\nTotal: 375,78\n\nJan:\n- Bratpfanne Seong: 14,99€\nSum: 14,99€ \n\nAlfred: \n- Schwein 7,99€\n- Dröna Fach 3x1,99€=5,97€\nSum: 13,96€\n\nSplitting:\n375,78 - 14,99 - 13,96 = 346,83\n346,83 / 2 = 173,41\n\nTotal Alfred: 173,41 + 13,96 = 187,37€\nTotal Jan: 173,41 + 14,99 = 188,40€\n\n## SEA ben trip geld\n\n\nHostels:\nHa Long: 1,81 (11,60 total)\nHo Chi Minh: 2.76 (17,30 total)\nBangkok: 7.31 (45,94 total)\nKoh Samui: 4,02 (25,30 total)\n\nHanoi: \nLake View Hostel: 1.04.000d (37,39e)\nLake View Hostel: 130.000d (4,68e)\n\n\nHue:\nSecret Garden Hostel: 269.00d (9,53e)\nGrab: 149.00d (5,27e)\n1995s Hostel: 63.000d (2,18e)\nGrab: 33.000d (1,14e)\nGrab: 111.000d (3,87e)\nGrab: 113.000d (3,94e)\nGrab: 26.000d (0,91e)\n\nHoi An:\nHostel Scalo Villa: 535.000d (18.61e)\n\n\n\n\n\n\n\n## Go Asia\n\nBun: 1.99\n\n\n\n## Meeting Kungwoo\nBehaviour cloning:\n- How much data is needed to properly train the model?\n- \n \nOther gameplay datasets:\n- pubg dataset\n- biggest dataset\n- 1k hours\n\nData collection\n- bot for data collection\n- nvid\n\n- complexity \n- different worlds\n- not focusing on video games\n -> only use one game (depending on game)\n -> not many games \n\n\n\nTier 0:\nhttps://www.youtube.com/results?search_query=alle+meine+entchen+klavier\n\n\n\nI was asked to play piano for someones graduation ceremoy. How much money should I ask for?\nIn the past i got 200 euros for playing Fantasie Impromptu by Chopin and the Waldstein Sonata by Beethoven.\n\nMy current options would be:\nWinter Wind by Choping\nBallad No 4 by Chopin (too long)\nThe Wii Mii Theme\nTwinkle Twinkle Little Star\nAlle meine Entchen\nDepartures by Animenz\nGlimpse of Us arranged by Birru\n\nWhat are some other options that I could play and should I ask for less or more money depending on the piece?\nShould I make a tier list?\n\nBelow is a summary table with the YouTube search URLs for each piece, and an explanation of how you would approach getting the direct video URL if you had access to YouTube’s Data API or a scraping library like youtube-search-python.\n\n| **Tier** | **Piece** | **Composer / Arranger** | **Difficulty** | **Suggested Fee** | **URL** |\n| -------- | --------------------------------- | ----------------------- | -------------------------------------- | ----------------- | ------------------------------------------- |\n| SS | Ballade No. 4 | Chopin | Extremely demanding, long, expressive | 800 € | https://www.youtube.com/watch?v=pe-GrRQz8pk |\n| S | Winter Wind Étude (Op. 25 No. 11) | Chopin | Very fast, technically virtuosic | 600 € | https://www.youtube.com/watch?v=pHlqEvAwdVc |\n| A | Glimpse of Us | Birru (arr.) | Modern ballad, emotionally nuanced | 400 € | https://www.youtube.com/watch?v=Xe2Pr4omHMs |\n| A | Departures | Animenz (arr.) | Fast, cinematic anime arrangement | 400 € | https://www.youtube.com/watch?v=5hft807EJ6o |\n| A | Aeolian Harp Étude (Op. 25 No. 1) | Chopin | Flowing arpeggios, control and clarity | 400 € | https://www.youtube.com/watch?v=Ob0AQLp3a5s |\n| B | Clair de Lune | Debussy | Voicing finesse, sensitive rubato | 370 € | https://www.youtube.com/watch?v=U3u4pQ4WKOk |\n| C | Nocturne Op. 9 No. 2 | Chopin | Elegant phrasing, ornamentation | 300 € | https://www.youtube.com/watch?v=JVBzE0mUlSs |\n| D | Twinkle Twinkle Little Star | Traditional | Beginner level, simple melody | 15€ | https://www.youtube.com/watch?v=yCjJyiqpAuU |\n| E | Wii Mii Theme | Nintendo (arr.) | Nostalgic, light and rhythmic | 10€ | https://www.youtube.com/watch?v=xo8H6e2Afi4 |\n| F | Alle meine Entchen | Traditional | Basic children's song | 5€ | https://www.youtube.com/watch?v=5CDLwq8UgIE |\n\n\nPerfect! Here's how you could add Fantaisie-Impromptu in the A-tier with a fitting description:\n\nTier\tPiece\tComposer / Arranger\tDifficulty\tSuggested Fee\tURL\nA\tFantaisie-Impromptu (Op. 66)\tChopin\tFast, rhythmically complex, technically demanding but less intense than top Chopin Études\t450 €\thttps://www.youtube.com/watch?v=IvUU8joBb1Q\n\nIf you want, I can help you refine the difficulty wording or suggest a fee based on your current pricing scale!\n\n\n\n\n\n\n\n\n\n",markdown,tab
|
3 |
+
2,98,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
4 |
+
3,851,"TERMINAL",0,0,"python /Users/alfred/.cursor/extensions/ms-python.python-2025.6.1-darwin-arm64/python_files/printEnvVariablesToFile.py /Users/alfred/.cursor/extensions/ms-python.python-2025.6.1-darwin-arm64/python_files/deactivate/bash/envVars.txt",,terminal_command
|
5 |
+
4,867,"TERMINAL",0,0,"]633;E;python /Users/alfred/.cursor/extensions/ms-python.python-2025.6.1-darwin-arm64/python_files/printEnvVariablesToFile.py /Users/alfred/.cursor/extensions/ms-python.python-2025.6.1-darwin-arm64/python_files/deactivate/bash/envVars.txt;94f1541c-df11-407c-9963-13cd6466203f]633;C",,terminal_output
|
6 |
+
5,876,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:51:37 AM [info] Activating crowd-code\n12:51:37 AM [info] Welcome back alfred. Your user-id is '05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1'. Happy coding!\n12:51:37 AM [info] Recording started\n",Log,content
|
05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-cb92c7b2-f6e4-4d49-91cb-88397630081c1750964172563-2025_06_26-20.56.24.104/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-cf821b01-6b29-41b7-8f83-b619919a05b21750839417809-2025_06_25-10.17.13.417/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
05d9d5da933137c5402a176a469b618685c7e9142aa8972616ca5cdf0f6e53d1/crowd-code-fe02aeb3-604a-4819-a48b-84d43ac5b72c1751037770876-2025_06_27-17.23.08.156/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-05830693-fec2-4daa-bf9a-df97d3f440b31752570818732-2025_07_15-11.14.44.738/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-062d67da-7ae5-4d1c-b9f1-a7d011641a591752908369905-2025_07_19-09.00.11.894/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-172d798b-bee8-455a-b904-9dd3fe6387d51754411154298-2025_08_05-18.25.56.221/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-1e500cb5-2752-4243-8002-15d1ebb40f691751798498690-2025_07_06-12.42.01.118/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-222fe98e-29ac-4b20-9a65-fe2e31f8eb701751128122769-2025_06_28-09.28.47.536/source.csv
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,2,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
3 |
+
2,62,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
4 |
+
3,97,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:28:47 AM [info] Activating crowd-code\n9:28:47 AM [info] Recording started\n9:28:47 AM [info] Initializing git provider using file system watchers...\n9:28:47 AM [info] Git repository found\n9:28:47 AM [info] Git provider initialized successfully\n",Log,content
|
5 |
+
4,195,"extension-output-pdoom-org.crowd-code-#1-crowd-code",245,0,"9:28:47 AM [info] Initial git state: [object Object]\n",Log,content
|
6 |
+
5,109366,"train_tokenizer.py",0,0,"",python,tab
|
7 |
+
6,120114,"train_tokenizer.py",0,0,"Switched from branch 'dataloader-reproducibility-test' to 'main'",python,git_branch_checkout
|
8 |
+
7,135115,"train_tokenizer.py",6657,0,"",python,selection_mouse
|
9 |
+
8,135131,"train_tokenizer.py",6656,0,"",python,selection_command
|
10 |
+
9,584544,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
11 |
+
10,587078,"utils/dataloader.py",4313,0,"",python,selection_command
|
12 |
+
11,651295,"utils/dataloader.py",0,0,"",python,selection_command
|
13 |
+
12,652011,"utils/dataloader.py",17,0,"",python,selection_command
|
14 |
+
13,652136,"utils/dataloader.py",28,0,"",python,selection_command
|
15 |
+
14,652214,"utils/dataloader.py",29,0,"",python,selection_command
|
16 |
+
15,652563,"utils/dataloader.py",36,0,"",python,selection_command
|
17 |
+
16,655020,"utils/dataloader.py",4380,0,"",python,selection_command
|
18 |
+
17,655495,"utils/dataloader.py",4341,0,"",python,selection_command
|
19 |
+
18,655578,"utils/dataloader.py",4345,0,"",python,selection_command
|
20 |
+
19,655774,"utils/dataloader.py",4352,0,"",python,selection_command
|
21 |
+
20,655989,"utils/dataloader.py",4359,0,"",python,selection_command
|
22 |
+
21,656093,"utils/dataloader.py",4340,0,"",python,selection_command
|
23 |
+
22,656226,"utils/dataloader.py",4345,0,"",python,selection_command
|
24 |
+
23,656410,"utils/dataloader.py",4340,0,"",python,selection_command
|
25 |
+
24,656615,"utils/dataloader.py",4295,0,"",python,selection_command
|
26 |
+
25,656753,"utils/dataloader.py",4303,0,"",python,selection_command
|
27 |
+
26,656931,"utils/dataloader.py",4305,0,"",python,selection_command
|
28 |
+
27,657095,"utils/dataloader.py",4312,0,"",python,selection_command
|
29 |
+
28,657232,"utils/dataloader.py",4313,0,"",python,selection_command
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-26402469-72d0-4ef2-8121-ec0cf7ef56091752689884853-2025_07_16-20.18.54.768/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-280dfe9f-0f96-4f1d-a304-44e1237157a81754289117685-2025_08_04-08.32.51.461/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2bb200ce-4bc8-4bc3-9354-29e24db5d38e1752063967983-2025_07_09-14.26.42.463/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2c44fb8b-ed49-4624-90ac-19d20bbba3331752833225757-2025_07_18-12.07.47.556/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-2f4ff312-abac-4732-805a-ec6c245e532e1752745206273-2025_07_17-11.40.45.491/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-33e2e492-00ca-4b0f-8523-da20187ea5301751307587112-2025_06_30-20.21.41.519/source.csv
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,12,"utils/dataloader.py",0,0,"import jax\nimport numpy as np\nimport grain\nfrom typing import Any\n\n\n# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write\n# them can perform this one-time conversion. Each record in the ArrayRecord file\n# is expected to be a dictionary with keys like ""raw_video"" and ""sequence_length"",\n# mirroring the structure of the original TFRecord features.\n\n\nclass ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n\n This class replaces the functionality of both `_parse_tfrecord_fn` and\n `_tf_process_episode` from the original tf.data pipeline. It operates on\n a single data record (episode).\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the transformation with processing parameters.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def random_map(self, element: dict, rng: np.random.Generator) -> Any:\n """"""\n Processes a single raw episode from the data source.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes), and\n 'sequence_length' (int).\n rng: A per-record random number generator provided by the Grain sampler.\n\n Returns:\n A processed video sequence as a NumPy array with shape\n (seq_len, image_h, image_w, image_c) and dtype float32.\n """"""\n # Step 1: Parse and reshape the raw video data (replaces _parse_tfrecord_fn)\n # We assume the element from the data source is a dict.\n video_shape = (\n element[""sequence_length""],\n self.image_h,\n self.image_w,\n self.image_c,\n )\n episode_tensor = np.frombuffer(element[""raw_video""], dtype=np.uint8)\n episode_tensor = episode_tensor.reshape(video_shape)\n\n # Step 2: Randomly slice and normalize (replaces _tf_process_episode)\n current_episode_len = episode_tensor.shape[0]\n if current_episode_len < self.seq_len:\n raise ValueError(f""An episode has length {current_episode_len}, which is ""\n f""shorter than the requested sequence length {self.seq_len}."")\n \n max_start_idx = current_episode_len - self.seq_len\n \n # Use the provided Grain RNG for deterministic randomness.\n start_idx = rng.integers(0, max_start_idx + 1)\n\n seq = episode_tensor[start_idx : start_idx + self.seq_len]\n\n # Normalize to [0, 1]\n processed_sequence = seq.astype(np.float32) / 255.0\n\n return processed_sequence\n\n\ndef get_dataloader_grain(\n array_record_paths: list[str], # List of ArrayRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n # shuffle_buffer_size is not needed; Grain shuffles all indices globally.\n num_workers: int = 4, # Replaces num_parallel_calls\n seed: int = 42,\n):\n """"""\n Creates a data loading pipeline using Google Grain.\n\n This function sets up a data source, a sampler for sharding/shuffling,\n and a sequence of operations (transformations) to be applied to the data.\n """"""\n if not array_record_paths:\n raise ValueError(""array_record_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n if global_batch_size % num_processes != 0:\n raise ValueError(\n f""Global batch size {global_batch_size} must be divisible by ""\n f""the number of JAX processes {num_processes} for proper sharding.""\n )\n per_process_batch_size = global_batch_size // num_processes\n\n # 1. DataSource: Reads records from ArrayRecord files.\n # This replaces `tf.data.TFRecordDataset`.\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\n \n # 2. Sampler: Defines the order of access, sharding, shuffling, and epochs.\n # This replaces `dataset.shard`, `dataset.shuffle`, and `dataset.repeat`.\n shard_options = grain.sharding.ShardOptions(\n shard_index=process_id, shard_count=num_processes, drop_remainder=True\n )\n\n sampler = grain.samplers.IndexSampler(\n num_records=len(source),\n shard_options=shard_options,\n seed=seed,\n num_epochs=None, # `None` means repeat indefinitely, like `tf.data.repeat(None)`.\n shuffle=True, # Enables global shuffling of all record indices.\n )\n\n # 3. Operations: A list of transformations to apply to each record.\n # This replaces the sequence of `.map()` calls.\n operations = [\n ProcessEpisodeAndSlice(\n seq_len=seq_len, image_h=image_h, image_w=image_w, image_c=image_c\n ),\n grain.transforms.Batch(batch_size=per_process_batch_size, drop_remainder=True),\n ]\n\n # 4. DataLoader: The main entry point that orchestrates the pipeline.\n # It manages worker processes for parallel data loading and transformation,\n # replacing `num_parallel_calls` and `.prefetch()`.\n dataloader = grain.DataLoader(\n data_source=source,\n sampler=sampler,\n operations=operations,\n worker_count=num_workers,\n )\n\n # 5. Return an iterator, which is the standard way to consume a Grain DataLoader.\n return iter(dataloader)",python,tab
|
3 |
+
2,45577,"utils/dataloader.py",1756,0,"",python,selection_mouse
|
4 |
+
3,45581,"utils/dataloader.py",1755,0,"",python,selection_command
|
5 |
+
4,47836,"utils/dataloader.py",0,0,"",python,selection_command
|
6 |
+
5,135562,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"8:20:34 PM [info] Activating crowd-code\n8:20:34 PM [info] Initializing git provider using file system watchers...\n8:20:34 PM [error] Autostart recording failed unexpectedly: Error: EACCES: permission denied, mkdir '/scratch/crowd-code/crowd-code-33e2e492-00ca-4b0f-8523-da20187ea5301751307587112-2025_06_30-20.20.34.639'\n8:20:34 PM [info] Git repository found\n8:20:34 PM [info] Git provider initialized successfully\n8:20:34 PM [info] Initial git state: [object Object]\n8:21:41 PM [info] Recording started\n",Log,tab
|
7 |
+
6,137827,"utils/dataloader.py",0,0,"",python,tab
|
8 |
+
7,137833,"TERMINAL",0,0,"",,terminal_focus
|
9 |
+
8,168423,"utils/dataloader.py",11,0,"",python,selection_command
|
10 |
+
9,168670,"utils/dataloader.py",30,0,"",python,selection_command
|
11 |
+
10,168680,"utils/dataloader.py",43,0,"",python,selection_command
|
12 |
+
11,168713,"utils/dataloader.py",66,0,"",python,selection_command
|
13 |
+
12,168900,"utils/dataloader.py",67,0,"",python,selection_command
|
14 |
+
13,169071,"utils/dataloader.py",68,0,"",python,selection_command
|
15 |
+
14,170086,"utils/dataloader.py",68,22,"# Note on Data Format:",python,selection_command
|
16 |
+
15,170312,"utils/dataloader.py",68,102,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you",python,selection_command
|
17 |
+
16,170441,"utils/dataloader.py",68,177,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script",python,selection_command
|
18 |
+
17,170593,"utils/dataloader.py",68,252,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write",python,selection_command
|
19 |
+
18,170709,"utils/dataloader.py",68,333,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write\n# them can perform this one-time conversion. Each record in the ArrayRecord file",python,selection_command
|
20 |
+
19,170849,"utils/dataloader.py",68,416,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write\n# them can perform this one-time conversion. Each record in the ArrayRecord file\n# is expected to be a dictionary with keys like ""raw_video"" and ""sequence_length"",",python,selection_command
|
21 |
+
20,171018,"utils/dataloader.py",68,477,"# Note on Data Format:\n# Grain's preferred file format is ArrayRecord. This implementation assumes you\n# have converted your TFRecord files to ArrayRecord files. A simple script\n# using `tf.data` to read TFRecords and `grain.ArrayRecordWriter` to write\n# them can perform this one-time conversion. Each record in the ArrayRecord file\n# is expected to be a dictionary with keys like ""raw_video"" and ""sequence_length"",\n# mirroring the structure of the original TFRecord features.",python,selection_command
|
22 |
+
21,176574,"utils/dataloader.py",485,0,"",python,selection_command
|
23 |
+
22,394785,"utils/dataloader.py",547,0,"",python,selection_mouse
|
24 |
+
23,395482,"utils/dataloader.py",5621,0,"",python,selection_command
|
25 |
+
24,397214,"utils/dataloader.py",5644,0,"\n ",python,content
|
26 |
+
25,397428,"utils/dataloader.py",5645,4,"",python,content
|
27 |
+
26,397654,"utils/dataloader.py",5645,0,"\n",python,content
|
28 |
+
27,398149,"utils/dataloader.py",5646,0,"c",python,content
|
29 |
+
28,398150,"utils/dataloader.py",5647,0,"",python,selection_keyboard
|
30 |
+
29,398263,"utils/dataloader.py",5647,0,"o",python,content
|
31 |
+
30,398264,"utils/dataloader.py",5648,0,"",python,selection_keyboard
|
32 |
+
31,398372,"utils/dataloader.py",5648,0,"v",python,content
|
33 |
+
32,398373,"utils/dataloader.py",5649,0,"",python,selection_keyboard
|
34 |
+
33,398377,"utils/dataloader.py",5649,0,"n",python,content
|
35 |
+
34,398378,"utils/dataloader.py",5650,0,"",python,selection_keyboard
|
36 |
+
35,398553,"utils/dataloader.py",5650,0,"e",python,content
|
37 |
+
36,398554,"utils/dataloader.py",5651,0,"",python,selection_keyboard
|
38 |
+
37,398623,"utils/dataloader.py",5651,0,"r",python,content
|
39 |
+
38,398624,"utils/dataloader.py",5652,0,"",python,selection_keyboard
|
40 |
+
39,398935,"utils/dataloader.py",5651,1,"",python,content
|
41 |
+
40,399061,"utils/dataloader.py",5650,1,"",python,content
|
42 |
+
41,399196,"utils/dataloader.py",5649,1,"",python,content
|
43 |
+
42,399325,"utils/dataloader.py",5648,1,"",python,content
|
44 |
+
43,399467,"utils/dataloader.py",5647,1,"",python,content
|
45 |
+
44,399701,"utils/dataloader.py",5646,1,"",python,content
|
46 |
+
45,446238,"utils/dataloader.py",5458,0,"",python,selection_mouse
|
47 |
+
46,446245,"utils/dataloader.py",5457,0,"",python,selection_command
|
48 |
+
47,446480,"utils/dataloader.py",0,0,"",python,selection_command
|
49 |
+
48,472031,"utils/dataloader.py",691,0,"class ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n",python,content
|
50 |
+
49,472032,"utils/dataloader.py",614,75," Converts TFRecord files to ArrayRecord format for use with Grain.\n \n Args:\n tfrecord_folder: Path to folder containing TFRecord files\n output_folder: Path to output folder for ArrayRecord files\n records_per_file: Number of records to write per ArrayRecord file\n feature_description: Dictionary describing TFRecord features. If None,\n uses default description for video data.\n \n Returns:\n List of paths to created ArrayRecord files\n """"""\n if feature_description is None:\n # Default feature description for video data\n feature_description = {\n 'raw_video': tf.io.FixedLenFeature([], tf.string),\n 'sequence_length': tf.io.FixedLenFeature([], tf.int64),\n }\n \n # Create output directory if it doesn't exist\n os.makedirs(output_folder, exist_ok=True)\n \n # Find all TFRecord files\n tfrecord_files = list(Path(tfrecord_folder).glob(""*.tfrecord""))\n if not tfrecord_files:\n raise ValueError(f""No TFRecord files found in {tfrecord_folder}"")\n \n print(f""Found {len(tfrecord_files)} TFRecord files"")\n \n # Create dataset from all TFRecord files\n dataset = tf.data.TFRecordDataset([str(f) for f in tfrecord_files])\n \n def parse_tfrecord(example_proto):\n """"""Parse a single TFRecord example.""""""\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\n return {\n 'raw_video': parsed_features['raw_video'].numpy(),\n 'sequence_length': int(parsed_features['sequence_length'].numpy())\n }\n \n # Process records and write to ArrayRecord files\n arrayrecord_files = []\n record_count = 0\n file_index = 0\n \n current_writer = None\n \n for record in dataset:\n parsed_record = parse_tfrecord(record)\n \n # Create new writer if needed\n if current_writer is None:\n output_file = os.path.join(output_folder, f""data_{file_index:04d}.arrayrecord"")\n current_writer = grain.ArrayRecordWriter(output_file)\n arrayrecord_files.append(output_file)\n \n # Write record\n current_writer.write(parsed_record)\n record_count += 1\n \n # Close current file and start new one if we've reached the limit\n if record_count % records_per_file == 0:\n current_writer.close()\n current_writer = None\n file_index += 1\n print(f""Created ArrayRecord file {file_index}: {record_count} records processed"")\n \n # Close final writer if it exists\n if current_writer is not None:\n current_writer.close()\n print(f""Created final ArrayRecord file: {record_count} total records processed"")\n \n print(f""Conversion complete! Created {len(arrayrecord_files)} ArrayRecord files"")\n return arrayrecord_files\n",python,content
|
51 |
+
50,472032,"utils/dataloader.py",548,57,"def convert_tfrecords_to_arrayrecords(\n tfrecord_folder: str,\n output_folder: str,\n records_per_file: int = 1000,\n feature_description: dict = None,\n):",python,content
|
52 |
+
51,472032,"utils/dataloader.py",66,0,"import tensorflow as tf\nimport os\nfrom pathlib import Path\n",python,content
|
53 |
+
52,472226,"utils/dataloader.py",3723,0," """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n\n",python,content
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3d995f1f-4c62-4f98-bebc-a37f170682391753352025448-2025_07_24-12.14.24.225/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3dde1b0c-c963-467e-aa73-fb6c54df3ae41751963426964-2025_07_08-10.30.57.271/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3e0d9e2e-4ec4-4d1f-bec7-d4425f5cfe1a1751911970182-2025_07_07-20.13.17.83/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-3f1ee1c9-e4cc-4ce6-b748-f84ba0479b8d1752061696547-2025_07_09-13.48.48.273/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-40a031fc-4dc5-4408-a821-8344d9146d941752833855790-2025_07_18-12.18.25.998/source.csv
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,9,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n init_lr: float = 0.0\n max_lr: float = 3e-4\n decay_end: float = 0.0\n wsd_decay_steps: int = 20000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n lr_schedule: str = ""wsd"" # supported options: wsd, cos \n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n ffn_dim: int = 2048\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 4\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n param_dtype: jnp.dtype = jnp.float32\n dtype: jnp.dtype = jnp.bfloat16\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n use_flash_attention: bool = True\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n # FIXME (f.srambical): Can we even do native int8 training without casting the video at all?\n # FIXME (f.srambical): If the tokenizer is the reason for the dynamics model being memory-bound,\n # should we at least train the tokenizer natively in int8?\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean() # type: ignore\n ssim = pix.ssim(gt, recon).mean() # type: ignore\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n ffn_dim=args.ffn_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=args.dtype,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n param_counts = count_parameters_by_component(init_params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(args.lr_schedule, \n args.init_lr, \n args.max_lr, \n args.decay_end, \n args.num_steps, \n args.warmup_steps, \n args.wsd_decay_steps)\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4, mu_dtype=args.dtype)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.StandardSave, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.StandardRestore, ocp.handlers.StandardCheckpointHandler\n )\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointSave, grain.checkpoint.CheckpointHandler) # type: ignore\n handler_registry.add(""dataloader_state"", grain.checkpoint.CheckpointRestore, grain.checkpoint.CheckpointHandler) # type: ignore\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_train_state = jax.tree_util.tree_map(\n ocp.utils.to_shape_dtype_struct, train_state\n )\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.StandardRestore(abstract_train_state),\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator),\n ),\n )\n train_state = restored[""model_state""]\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n\n # --- TRAIN LOOP ---\n dataloader = (jax.make_array_from_process_local_data(videos_sharding, elem) for elem in grain_iterator) # type: ignore\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.StandardSave(train_state),\n dataloader_state=grain.checkpoint.CheckpointSave(\n grain_iterator\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
3 |
+
2,1135,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:18:25 PM [info] Activating crowd-code\n12:18:26 PM [info] Recording started\n12:18:26 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,1216,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"12:18:26 PM [info] Git repository found\n12:18:26 PM [info] Git provider initialized successfully\n12:18:26 PM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,30846,"train_tokenizer.py",0,0,"",python,tab
|
6 |
+
5,35005,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=24:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/%x_%j.log\n#SBATCH --job-name=train_tokenizer_lr_sweep_1e-4_larger_ffn\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$PWD/checkpoints/$job_name\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --restore_ckpt \\n --save_ckpt \\n --batch_size=96 \\n --ffn_dim=2048 \\n --num_blocks=4 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --decay_end=0.0 \\n --wsd_decay_steps=10000 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=1000 \\n --log \\n --name=mixed-precision-tokenizer-lr-1e-4-larger-ffn \\n --tags tokenizer mixed-precision 1e-4 mixed-precision flash-attention larger-ffn \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir",shellscript,tab
|
7 |
+
6,42115,"train_tokenizer.py",0,0,"",python,tab
|
8 |
+
7,42747,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"",shellscript,tab
|
9 |
+
8,43131,"train_tokenizer.py",0,0,"",python,tab
|
10 |
+
9,44712,"train_tokenizer.py",671,0,"",python,selection_command
|
11 |
+
10,45149,"train_tokenizer.py",6700,0,"",python,selection_command
|
12 |
+
11,45633,"train_tokenizer.py",6649,0,"",python,selection_command
|
13 |
+
12,45821,"train_tokenizer.py",6601,0,"",python,selection_command
|
14 |
+
13,45946,"train_tokenizer.py",6552,0,"",python,selection_command
|
15 |
+
14,46061,"train_tokenizer.py",6499,0,"",python,selection_command
|
16 |
+
15,46276,"train_tokenizer.py",6498,0,"",python,selection_command
|
17 |
+
16,46419,"train_tokenizer.py",6494,0,"",python,selection_command
|
18 |
+
17,46571,"train_tokenizer.py",6493,0,"",python,selection_command
|
19 |
+
18,46738,"train_tokenizer.py",6478,0,"",python,selection_command
|
20 |
+
19,47562,"utils/lr_utils.py",0,0,"import optax\n\ndef get_lr_schedule(lr_schedule: str, init_lr: float, max_lr: float, decay_end: float, total_steps: int, warmup_steps: int, wsd_decay_steps: int) -> optax.Schedule:\n supported_schedules = [""wsd"", ""cos""]\n if lr_schedule == ""cos"":\n assert warmup_steps <= total_steps, ""Warmup steps can't be greater than total steps.""\n return optax.warmup_cosine_decay_schedule(\n init_value=init_lr,\n peak_value=max_lr,\n warmup_steps=warmup_steps,\n decay_steps=total_steps, # Note: decay_steps includes the warmup steps, so we need to pass total value\n end_value=decay_end \n )\n elif lr_schedule == ""wsd"":\n assert warmup_steps + wsd_decay_steps <= total_steps, ""Warmup and decay period is longer than total steps.""\n schedules = [\n optax.linear_schedule(init_value=init_lr, end_value=max_lr, transition_steps=warmup_steps),\n optax.constant_schedule(value=max_lr),\n optax.linear_schedule(init_value=max_lr, end_value=decay_end, transition_steps=wsd_decay_steps),\n ]\n boundaries = [warmup_steps, total_steps - wsd_decay_steps]\n return optax.join_schedules(schedules, boundaries)\n else:\n raise ValueError(f""Learning rate schedule not supported. Please use one of {supported_schedules}"")",python,tab
|
21 |
+
20,47565,"utils/lr_utils.py",18,0,"",python,selection_command
|
22 |
+
21,52015,"utils/lr_utils.py",101,0,"",python,selection_command
|
23 |
+
22,52334,"utils/lr_utils.py",280,0,"",python,selection_command
|
24 |
+
23,56412,"utils/lr_utils.py",520,0,"",python,selection_command
|
25 |
+
24,57652,"utils/lr_utils.py",734,0,"",python,selection_command
|
26 |
+
25,58689,"utils/lr_utils.py",1133,0,"",python,selection_command
|
27 |
+
26,90985,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"",shellscript,tab
|
28 |
+
27,93922,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1085,0,"\n ",shellscript,content
|
29 |
+
28,94205,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1090,0,"-",shellscript,content
|
30 |
+
29,94206,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1091,0,"",shellscript,selection_keyboard
|
31 |
+
30,94366,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1091,0,"-",shellscript,content
|
32 |
+
31,94367,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1092,0,"",shellscript,selection_keyboard
|
33 |
+
32,94532,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1092,0,"n",shellscript,content
|
34 |
+
33,94533,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1093,0,"",shellscript,selection_keyboard
|
35 |
+
34,94619,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1093,0,"u",shellscript,content
|
36 |
+
35,94620,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1094,0,"",shellscript,selection_keyboard
|
37 |
+
36,94800,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1094,0,"m",shellscript,content
|
38 |
+
37,94801,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1095,0,"",shellscript,selection_keyboard
|
39 |
+
38,95807,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1095,0,"_steps=100000 \",shellscript,content
|
40 |
+
39,96089,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1109,0,"",shellscript,selection_command
|
41 |
+
40,96852,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1108,0,"",shellscript,selection_command
|
42 |
+
41,97091,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1107,0,"",shellscript,selection_command
|
43 |
+
42,97093,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1106,0,"",shellscript,selection_command
|
44 |
+
43,97133,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1105,0,"",shellscript,selection_command
|
45 |
+
44,97331,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1104,0,"",shellscript,selection_command
|
46 |
+
45,97520,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1103,0,"",shellscript,selection_command
|
47 |
+
46,97684,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1102,0,"",shellscript,selection_command
|
48 |
+
47,98641,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1102,6,"",shellscript,content
|
49 |
+
48,100813,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1102,0,"5",shellscript,content
|
50 |
+
49,100814,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1103,0,"",shellscript,selection_keyboard
|
51 |
+
50,100825,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1103,0,"3",shellscript,content
|
52 |
+
51,100826,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1104,0,"",shellscript,selection_keyboard
|
53 |
+
52,101325,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1104,0,"0",shellscript,content
|
54 |
+
53,101325,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1105,0,"",shellscript,selection_keyboard
|
55 |
+
54,101496,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1105,0,"0",shellscript,content
|
56 |
+
55,101498,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1106,0,"",shellscript,selection_keyboard
|
57 |
+
56,101663,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1106,0,"0",shellscript,content
|
58 |
+
57,101664,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1107,0,"",shellscript,selection_keyboard
|
59 |
+
58,101893,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",1106,0,"",shellscript,selection_command
|
60 |
+
59,113445,"TERMINAL",0,0,"sbatch experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",,terminal_command
|
61 |
+
60,113502,"TERMINAL",0,0,"]633;CSubmitted batch job 3357211\r\n]0;tum_dbd0378@hkn1993:~/jafar",,terminal_output
|
62 |
+
61,115740,"TERMINAL",0,0,"queue",,terminal_command
|
63 |
+
62,115809,"TERMINAL",0,0,"]633;C",,terminal_output
|
64 |
+
63,115909,"TERMINAL",0,0,"[?1049h[22;0;0t[1;58r(B[m[4l[?7h[H[2J[1;14Hhkn1993.localdomain: Fri Jul 18 12:20:21 2025[3;14HJOBID PARTITION NAME USER ST\tTI[4;1HME NODES NODELIST(REASON)[5;12H3357211 accelerat train_to tum_dbd0 PD\t0:[6;1H00\t2 (None)[7;12H3356521 accelerat interact tum_dbd0 R 2:29:[8;1H49\t2 hkn[0510,0517][9;12H3356520 accelerat interact tum_dbd0 R 2:30:[10;1H06\t2 hkn[0508-0509][58;58H",,terminal_output
|
65 |
+
64,116931,"TERMINAL",0,0,"[1;53H2\r[8d50[10d7[58;58H",,terminal_output
|
66 |
+
65,117973,"TERMINAL",0,0,"[1;53H3\r[8d51[10d8[58;58H",,terminal_output
|
67 |
+
66,119017,"TERMINAL",0,0,"[1;53H4\r[8d52[10d9[58;58H",,terminal_output
|
68 |
+
67,120052,"TERMINAL",0,0,"[1;53H5\r[8d53\r[10d10[58;58H",,terminal_output
|
69 |
+
68,121089,"TERMINAL",0,0,"[1;53H6\r[8d54[10d1[58;58H",,terminal_output
|
70 |
+
69,122170,"TERMINAL",0,0,"[1;53H7\r[8d55[10d2[58;58H",,terminal_output
|
71 |
+
70,123217,"TERMINAL",0,0,"[1;53H8\r[8d57[10d4[58;58H",,terminal_output
|
72 |
+
71,124252,"TERMINAL",0,0,"[1;52H30\r[8d58[10d5[58;58H",,terminal_output
|
73 |
+
72,125341,"TERMINAL",0,0,"[1;53H1\r[8d59[10d6[58;58H",,terminal_output
|
74 |
+
73,126428,"TERMINAL",0,0,"[1;53H2[7d2:30\r[8d00[10d7[58;58H",,terminal_output
|
75 |
+
74,127867,"TERMINAL",0,0,"[1;53H3\r[8d01[10d8[58;58H",,terminal_output
|
76 |
+
75,128698,"TERMINAL",0,0,"[1;53H4\r[8d02[10d9[58;58H",,terminal_output
|
77 |
+
76,129579,"TERMINAL",0,0,"[1;53H5\r[8d03\r[10d20[58;58H",,terminal_output
|
78 |
+
77,130711,"TERMINAL",0,0,"[1;53H6\r[8d04[10d1[58;58H",,terminal_output
|
79 |
+
78,131616,"TERMINAL",0,0,"[1;53H7\r[8d05[10d2[58;58H",,terminal_output
|
80 |
+
79,132701,"TERMINAL",0,0,"[1;53H8[5;48H R[6;11Hhkn[0628,0632]\r[8d06[10d3[58;58H",,terminal_output
|
81 |
+
80,133763,"TERMINAL",0,0,"[1;53H9\r[6d01[8d7[10d4[58;58H",,terminal_output
|
82 |
+
81,134771,"TERMINAL",0,0,"[1;52H40\r[6d02[8d8[10d5[58;58H",,terminal_output
|
83 |
+
82,135814,"TERMINAL",0,0,"[1;53H1\r[6d03[8d9[10d6[58;58H",,terminal_output
|
84 |
+
83,136587,"TERMINAL",0,0,"[58;1H[?1049l[23;0;0t\r[?1l>]0;tum_dbd0378@hkn1993:~/jafar",,terminal_output
|
85 |
+
84,137685,"TERMINAL",0,0,"idle",,terminal_command
|
86 |
+
85,137741,"TERMINAL",0,0,"]633;CPartition dev_cpuonly : 10 nodes idle\r\nPartition cpuonly : 36 nodes idle\r\nPartition dev_accelerated : 1 nodes idle\r\nPartition accelerated : 45 nodes idle\r\nPartition dev_accelerated-h100 : 0 nodes idle\r\nPartition accelerated-h100 : 0 nodes idle\r\nPartition large : 7 nodes idle\r\n]0;tum_dbd0378@hkn1993:~/jafar",,terminal_output
|
87 |
+
86,139507,"TERMINAL",0,0,"queue",,terminal_command
|
88 |
+
87,139545,"TERMINAL",0,0,"]633;C",,terminal_output
|
89 |
+
88,139622,"TERMINAL",0,0,"[?1049h[22;0;0t[1;58r(B[m[4l[?7h[H[2J[1;14Hhkn1993.localdomain: Fri Jul 18 12:20:45 2025[3;14HJOBID PARTITION NAME USER ST\tTI[4;1HME NODES NODELIST(REASON)[5;12H3357211 accelerat train_to tum_dbd0 R\t0:[6;1H07\t2 hkn[0628,0632][7;12H3356521 accelerat interact tum_dbd0 R 2:30:[8;1H13\t2 hkn[0510,0517][9;12H3356520 accelerat interact tum_dbd0 R 2:30:[10;1H30\t2 hkn[0508-0509][58;58H",,terminal_output
|
90 |
+
89,140702,"TERMINAL",0,0,"[1;53H6\r[6d08[8d4[10d1[58;58H",,terminal_output
|
91 |
+
90,141909,"TERMINAL",0,0,"[1;53H7\r[6d09[8d5[10d2[58;58H",,terminal_output
|
92 |
+
91,142926,"TERMINAL",0,0,"[1;53H8\r[6d10[8d6[10d3[58;58H",,terminal_output
|
93 |
+
92,143940,"TERMINAL",0,0,"[1;53H9\r[6d11[8d7[10d4[58;58H",,terminal_output
|
94 |
+
93,145624,"TERMINAL",0,0,"[1;52H50\r[6d12[8d8[10d5[58;58H",,terminal_output
|
95 |
+
94,146013,"TERMINAL",0,0,"[1;53H1\r[6d13[8d9[10d6[58;58H",,terminal_output
|
96 |
+
95,147707,"TERMINAL",0,0,"[1;53H2\r[6d14\r[8d20[10d7[58;58H",,terminal_output
|
97 |
+
96,148814,"TERMINAL",0,0,"[1;53H3\r[6d15[8d1[10d8[58;58H",,terminal_output
|
98 |
+
97,149134,"TERMINAL",0,0,"[1;53H4\r[6d16[8d2[10d9[58;58H",,terminal_output
|
99 |
+
98,150175,"TERMINAL",0,0,"[1;53H5\r[6d17[8d3\r[10d40[58;58H",,terminal_output
|
100 |
+
99,152050,"TERMINAL",0,0,"[1;53H6\r[6d18[8d4[10d1[58;58H",,terminal_output
|
101 |
+
100,153532,"TERMINAL",0,0,"[1;53H7\r[6d20[8d6[10d3[58;58H",,terminal_output
|
102 |
+
101,155748,"TERMINAL",0,0,"[1;53H9\r[6d21[8d7[10d4[58;58H",,terminal_output
|
103 |
+
102,159039,"TERMINAL",0,0,"[1;50H1:00\r[6d22[8d8[10d5[58;58H",,terminal_output
|
104 |
+
103,159057,"TERMINAL",0,0,"[1;53H1\r[6d23[8d9[10d6[58;58H",,terminal_output
|
105 |
+
104,159169,"TERMINAL",0,0,"[1;53H2\r[6d24\r[8d30[10d7[58;58H",,terminal_output
|
106 |
+
105,159509,"TERMINAL",0,0,"[1;53H3\r[6d25[8d1[10d8[58;58H[1;53H4\r[6d26[8d2[10d9[58;58H",,terminal_output
|
107 |
+
106,160016,"TERMINAL",0,0,"[1;53H5\r[6d27[8d3\r[10d50[58;58H",,terminal_output
|
108 |
+
107,161148,"TERMINAL",0,0,"[1;53H6\r[6d28[8d4[10d1[58;58H",,terminal_output
|
109 |
+
108,162045,"TERMINAL",0,0,"[1;53H7\r[6d29[8d5[10d2[58;58H",,terminal_output
|
110 |
+
109,163298,"TERMINAL",0,0,"[1;53H8\r[6d30[8d6[10d3[58;58H",,terminal_output
|
111 |
+
110,164188,"TERMINAL",0,0,"[1;53H9\r[6d31[8d7[10d4[58;58H",,terminal_output
|
112 |
+
111,165207,"TERMINAL",0,0,"[1;52H10\r[6d32[8d8[10d5[58;58H",,terminal_output
|
113 |
+
112,201431,"TERMINAL",0,0,"[1;53H1\r[6d33[8d9[10d6[58;58H",,terminal_output
|
114 |
+
113,201816,"TERMINAL",0,0,"[1;53H2\r[6d34\r[8d40[10d7[58;58H",,terminal_output
|
115 |
+
114,202136,"TERMINAL",0,0,"[1;53H3\r[6d35[8d1[10d8[58;58H",,terminal_output
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-41b294b4-b89c-4c1d-8a02-14afc9168dc41753085667665-2025_07_21-10.15.04.628/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4457e5d2-f5e8-4b15-95aa-bafa247369991751528947759-2025_07_03-09.50.10.663/source.csv
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,7,"utils/dataloader.py",0,0,"import jax\nimport numpy as np\nimport grain\nfrom typing import Any, Optional\nfrom array_record.python.array_record_module import ArrayRecordWriter\nimport tensorflow as tf\nimport os\nfrom pathlib import Path\nimport pickle\nimport multiprocessing as mp\nfrom functools import partial\nimport logging\n\ngrain.config.update(""py_debug_mode"", True)\n\n# Configure logging to see debug output\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\ndef _convert_single_tfrecord(\n tfrecord_file: Path,\n output_folder: str,\n feature_description: dict,\n) -> str:\n """"""\n Convert a single TFRecord file to ArrayRecord format.\n \n Args:\n tfrecord_file: Path to the TFRecord file\n output_folder: Output folder for the ArrayRecord file\n feature_description: Dictionary describing TFRecord features\n \n Returns:\n Path to the created ArrayRecord file\n """"""\n output_filename = tfrecord_file.stem + "".array_record""\n output_file = os.path.join(output_folder, output_filename)\n \n dataset = tf.data.TFRecordDataset(str(tfrecord_file))\n \n def parse_tfrecord(example_proto):\n """"""Parse a single TFRecord example.""""""\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\n raw_video_bytes = parsed_features['raw_video'].numpy()\n sequence_length = int(parsed_features['sequence_length'].numpy())\n \n return {\n 'raw_video': raw_video_bytes,\n 'sequence_length': sequence_length,\n }\n \n record_count = 0\n writer = ArrayRecordWriter(output_file, ""group_size:1"")\n for record in dataset:\n parsed_record = parse_tfrecord(record)\n writer.write(pickle.dumps(parsed_record))\n record_count += 1\n writer.close()\n \n print(f""Converted {tfrecord_file.name} -> {output_filename}: {record_count} records"")\n return output_file\n\n\ndef convert_tfrecords_to_arrayrecords(\n tfrecord_folder: str,\n output_folder: str,\n feature_description: Optional[dict] = None,\n num_workers: Optional[int] = None,\n):\n """"""\n Converts TFRecord files to ArrayRecord format for use with Grain.\n Creates one ArrayRecord file per TFRecord file using multiprocessing.\n \n Args:\n tfrecord_folder: Path to folder containing TFRecord files\n output_folder: Path to output folder for ArrayRecord files\n feature_description: Dictionary describing TFRecord features. If None,\n uses default description for video data.\n num_workers: Number of worker processes. If None, uses CPU count.\n \n Returns:\n List of paths to created ArrayRecord files\n """"""\n if feature_description is None:\n feature_description = {\n 'raw_video': tf.io.FixedLenFeature([], tf.string),\n 'sequence_length': tf.io.FixedLenFeature([], tf.int64),\n }\n \n os.makedirs(output_folder, exist_ok=True)\n \n tfrecord_files = list(Path(tfrecord_folder).glob(""*.tfrecord""))\n if not tfrecord_files:\n raise ValueError(f""No TFRecord files found in {tfrecord_folder}"")\n \n print(f""Found {len(tfrecord_files)} TFRecord files"")\n \n if num_workers is None:\n num_workers = min(mp.cpu_count(), len(tfrecord_files))\n \n print(f""Using {num_workers} worker processes for conversion"")\n \n convert_func = partial(\n _convert_single_tfrecord,\n output_folder=output_folder,\n feature_description=feature_description\n )\n \n with mp.Pool(processes=num_workers) as pool:\n arrayrecord_files = pool.map(convert_func, tfrecord_files)\n \n print(f""Conversion complete! Created {len(arrayrecord_files)} ArrayRecord files"")\n return arrayrecord_files\n\n\nclass ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the transformation with processing parameters.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def random_map(self, element: dict, rng: np.random.Generator) -> Any:\n """"""\n Processes a single raw episode from the data source.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes) and 'sequence_length' (int)\n rng: A per-record random number generator provided by the Grain sampler.\n\n Returns:\n A processed video sequence as a NumPy array with shape\n (seq_len, height, width, channels) and dtype float32.\n """"""\n assert isinstance(element, bytes)\n element = pickle.loads(element)\n \n video_shape = (\n element[""sequence_length""],\n self.image_h,\n self.image_w,\n self.image_c,\n )\n episode_tensor = np.frombuffer(element[""raw_video""], dtype=np.uint8)\n episode_tensor = episode_tensor.reshape(video_shape)\n\n current_episode_len = episode_tensor.shape[0]\n if current_episode_len < self.seq_len:\n raise ValueError(f""An episode has length {current_episode_len}, which is ""\n f""shorter than the requested sequence length {self.seq_len}."")\n \n max_start_idx = current_episode_len - self.seq_len\n \n start_idx = rng.integers(0, max_start_idx + 1)\n\n seq = episode_tensor[start_idx : start_idx + self.seq_len]\n\n processed_sequence = seq.astype(np.float32) / 255.0\n\n return processed_sequence\n\n\ndef get_dataloader(\n array_record_paths: list[str],\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n num_workers: int = 1,\n prefetch_buffer_size: int = 1,\n seed: int = 42,\n):\n """"""\n Creates a data loading pipeline using Grain.\n """"""\n if not array_record_paths:\n raise ValueError(""array_record_paths list cannot be empty."")\n\n num_processes = jax.process_count()\n\n if global_batch_size % num_processes != 0:\n raise ValueError(\n f""Global batch size {global_batch_size} must be divisible by ""\n f""the number of JAX processes {num_processes} for proper sharding.""\n )\n per_process_batch_size = global_batch_size // num_processes\n\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\n \n sampler = grain.samplers.IndexSampler(\n num_records=len(source),\n shard_options=grain.sharding.ShardByJaxProcess(drop_remainder=True),\n # FIXME: check whether the global shuffle is the reason why the dataloader is so slow\n shuffle=False,\n num_epochs=100, # FIXME: is there an equivalent to tf.data.repeat(None)?\n seed=seed,\n )\n\n operations = [\n ProcessEpisodeAndSlice(\n seq_len=seq_len, image_h=image_h, image_w=image_w, image_c=image_c\n ),\n grain.transforms.Batch(batch_size=per_process_batch_size, drop_remainder=True),\n ]\n\n read_options = grain.ReadOptions(\n prefetch_buffer_size=prefetch_buffer_size,\n # FIXME: `If the data is already loaded in memory, we recommend setting this to 0 to\n # avoid Python GIL contention by multiple threads.`\n num_threads=1,\n )\n dataloader = grain.DataLoader(\n data_source=source,\n sampler=sampler,\n operations=operations,\n worker_count=num_workers,\n # FIXME: think about whether we should tune this\n worker_buffer_size=1,\n read_options=read_options,\n )\n\n return iter(dataloader)\n\n",python,tab
|
3 |
+
2,440,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:50:10 AM [info] Activating crowd-code\n9:50:10 AM [info] Recording started\n9:50:10 AM [info] Initializing git provider using file system watchers...\n9:50:10 AM [info] Git repository found\n9:50:10 AM [info] Git provider initialized successfully\n9:50:10 AM [info] Initial git state: [object Object]\n",Log,tab
|
4 |
+
3,9246,"utils/dataloader.py",0,0,"",python,tab
|
5 |
+
4,27286,"TERMINAL",0,0,"",,terminal_command
|
6 |
+
5,27298,"TERMINAL",0,0,"[H[2J]633;A(jafar) ]633;B\r\n[?2004l\r]633;E;;]633;C]0;tum_dbd0378@hkn1993:~/jafar]633;D",,terminal_output
|
7 |
+
6,32834,"TERMINAL",0,0,"cd /home/hk-project-p0023960/tum_dbd0378/jafar && python calculate_avg_frames.py",,terminal_command
|
8 |
+
7,32888,"TERMINAL",0,0,"]633;E;2025-07-03 09:50:43 cd /home/hk-project-p0023960/tum_dbd0378/jafar && python calculate_avg_frames.py;]633;C",,terminal_output
|
9 |
+
8,33000,"TERMINAL",0,0,"Reading ArrayRecord file: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy_arrayrecords_600_shards/videos_0010.array_record\r\nError: 'array_record.python.array_record_module.ArrayRecordReader' object is not iterable\r\n]0;tum_dbd0378@hkn1993:~/jafar]633;D;1]633;P;Cwd=/home/hk-project-p0023960/tum_dbd0378/jafar[?2004h",,terminal_output
|
10 |
+
9,44132,"calculate_avg_frames.py",0,0,"#!/usr/bin/env python3\n""""""\nScript to calculate the average number of frames per video in an ArrayRecord file.\n""""""\n\nfrom array_record.python.array_record_module import ArrayRecordReader\nimport pickle\nimport sys\n\ndef calculate_avg_frames(array_record_path):\n """"""\n Calculate the average number of frames per video in an ArrayRecord file.\n \n Args:\n array_record_path: Path to the ArrayRecord file\n \n Returns:\n Average number of frames per video\n """"""\n total_frames = 0\n num_videos = 0\n \n print(f""Reading ArrayRecord file: {array_record_path}"")\n \n reader = ArrayRecordReader(array_record_path)\n for record in reader:\n data = pickle.loads(record)\n total_frames += data[""sequence_length""]\n num_videos += 1\n \n # Print progress every 1000 videos\n if num_videos % 1000 == 0:\n print(f""Processed {num_videos} videos..."")\n \n reader.close()\n \n if num_videos > 0:\n avg_frames = total_frames / num_videos\n print(f""\nResults:"")\n print(f""Total videos: {num_videos}"")\n print(f""Total frames: {total_frames}"")\n print(f""Average frames per video: {avg_frames:.2f}"")\n return avg_frames\n else:\n print(""No videos found in the ArrayRecord file."")\n return 0\n\nif __name__ == ""__main__"":\n # Default path to the ArrayRecord file\n array_record_path = ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy_arrayrecords_600_shards/videos_0010.array_record""\n \n # Allow command line argument to override the path\n if len(sys.argv) > 1:\n array_record_path = sys.argv[1]\n \n try:\n calculate_avg_frames(array_record_path)\n except Exception as e:\n print(f""Error: {e}"")\n sys.exit(1) ",python,tab
|
11 |
+
10,45692,"calculate_avg_frames.py",142,0,"",python,selection_mouse
|
12 |
+
11,47314,"calculate_avg_frames.py",160,0,"",python,selection_command
|
13 |
+
12,47505,"calculate_avg_frames.py",167,0,"",python,selection_command
|
14 |
+
13,48186,"calculate_avg_frames.py",608,0,"",python,selection_command
|
15 |
+
14,75883,"calculate_avg_frames.py",0,1790," ",python,content
|
16 |
+
15,76043,"calculate_avg_frames.py",0,0,"#!/usr/bin/env python3\n""""""\nScript to calculate the average number of frames per video in an ArrayRecord file.\n""""""\n\nimport grain\nimport pickle\nimport sys\n\ndef calculate_avg_frames(array_record_path):\n """"""\n Calculate the average number of frames per video in an ArrayRecord file.\n \n Args:\n array_record_path: Path to the ArrayRecord file\n \n Returns:\n Average number of frames per video\n """"""\n total_frames = 0\n num_videos = 0\n \n print(f""Reading ArrayRecord file: {array_record_path}"")\n \n # Use Grain's ArrayRecordDataSource to read the file\n data_source = grain.sources.ArrayRecordDataSource([array_record_path])\n num_records = len(data_source)\n \n print(f""Found {num_records} records in the file"")\n \n for i in range(num_records):\n record_bytes = data_source[i]\n data = pickle.loads(record_bytes)\n total_frames += data[""sequence_length""]\n num_videos += 1\n \n # Print progress every 1000 videos\n if num_videos % 1000 == 0:\n print(f""Processed {num_videos}/{num_records} videos..."")\n \n if num_videos > 0:\n avg_frames = total_frames / num_videos\n print(f""\nResults:"")\n print(f""Total videos: {num_videos}"")\n print(f""Total frames: {total_frames}"")\n print(f""Average frames per video: {avg_frames:.2f}"")\n return avg_frames\n else:\n print(""No videos found in the ArrayRecord file."")\n return 0\n\nif __name__ == ""__main__"":\n # Default path to the ArrayRecord file\n array_record_path = ""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy_arrayrecords_600_shards/videos_0010.array_record""\n \n # Allow command line argument to override the path\n if len(sys.argv) > 1:\n array_record_path = sys.argv[1]\n \n try:\n calculate_avg_frames(array_record_path)\n except Exception as e:\n print(f""Error: {e}"")\n sys.exit(1) \n",python,content
|
17 |
+
16,76046,"calculate_avg_frames.py",1955,2,"",python,content
|
18 |
+
17,83394,"TERMINAL",0,0,"python calculate_avg_frames.py",,terminal_command
|
19 |
+
18,83449,"TERMINAL",0,0,"]633;E;2025-07-03 09:51:33 python calculate_avg_frames.py;]633;C",,terminal_output
|
20 |
+
19,95104,"TERMINAL",0,0,"Reading ArrayRecord file: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy_arrayrecords_600_shards/videos_0010.array_record\r\nFound 100 records in the file\r\n\r\nResults:\r\nTotal videos: 100\r\nTotal frames: 1600\r\nAverage frames per video: 16.00\r\n",,terminal_output
|
21 |
+
20,95266,"TERMINAL",0,0,"]0;tum_dbd0378@hkn1993:~/jafar]633;D;0]633;P;Cwd=/home/hk-project-p0023960/tum_dbd0378/jafar",,terminal_output
|
22 |
+
21,158929,"TERMINAL",0,0,"/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record",,terminal_command
|
23 |
+
22,159080,"TERMINAL",0,0,"]633;E;2025-07-03 09:52:47 python calculate_avg_frames.py /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record;]633;C",,terminal_output
|
24 |
+
23,159326,"TERMINAL",0,0,"Reading ArrayRecord file: /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record\r\nFound 2 records in the file\r\nERROR:absl:File /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record was created with group size 65536. Grain requires group size 1 for good performance. Please re-generate your ArrayRecord files with 'group_size:1'.\r\n",,terminal_output
|
25 |
+
24,165149,"TERMINAL",0,0,"\r\nResults:\r\nTotal videos: 2\r\nTotal frames: 21361\r\nAverage frames per video: 10680.50\r\n",,terminal_output
|
26 |
+
25,169642,"TERMINAL",0,0,"python calculate_avg_frames.py /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards/shard-00100-of-00500.array_record",,terminal_focus
|
27 |
+
26,176593,"calculate_avg_frames.py",419,0,"",python,selection_mouse
|
28 |
+
27,176597,"calculate_avg_frames.py",418,0,"",python,selection_command
|
29 |
+
28,178545,"utils/dataloader.py",0,0,"",python,tab
|
30 |
+
29,181793,"generate_arrayrecord_dataset.py",0,0,"#!/usr/bin/env python3\n""""""\nGenerate ArrayRecord dataset compatible with train_tokenizer.py\n\nThis script creates synthetic video data and saves it in ArrayRecord format\nthat can be used by the tokenizer training script.\n""""""\n\nimport os\nimport pickle\nimport numpy as np\nimport grain\nfrom array_record.python.array_record_module import ArrayRecordWriter\nimport argparse\nimport multiprocessing as mp\nfrom functools import partial\nimport time\n\n\ndef generate_synthetic_video(\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3\n) -> np.ndarray:\n """"""\n Generate synthetic video data with random frames for training.\n \n Args:\n seq_len: Number of frames in the video sequence\n height: Height of each frame\n width: Width of each frame\n channels: Number of color channels\n \n Returns:\n Video array of shape (seq_len, height, width, channels)\n """"""\n video = np.random.rand(seq_len, height, width, channels).astype(np.float32)\n \n return video\n\n\ndef create_single_arrayrecord_file(\n file_info: tuple,\n output_dir: str,\n seq_len: int,\n height: int,\n width: int,\n channels: int,\n records_per_file: int,\n seed: int\n) -> tuple:\n """"""\n Create a single ArrayRecord file with synthetic video data.\n \n Args:\n file_info: Tuple of (file_idx, start_idx, end_idx)\n output_dir: Directory to save ArrayRecord files\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n \n Returns:\n Tuple of (file_path, num_videos_created, success)\n """"""\n file_idx, start_idx, end_idx = file_info\n videos_in_file = end_idx - start_idx\n \n # Set seed for this process (add file_idx to make each file different)\n np.random.seed(seed + file_idx)\n \n file_path = os.path.join(output_dir, f""videos_{file_idx:04d}.array_record"")\n \n try:\n writer = ArrayRecordWriter(file_path, ""group_size:1"")\n \n for video_idx in range(videos_in_file):\n video = generate_synthetic_video(seq_len, height, width, channels)\n \n # Convert to uint8 format as expected by the dataloader\n video_uint8 = (video * 255).astype(np.uint8)\n \n # Create record in the format expected by ProcessEpisodeAndSlice\n record = {\n ""raw_video"": video_uint8.tobytes(),\n ""sequence_length"": seq_len\n }\n \n writer.write(pickle.dumps(record))\n \n writer.close()\n return (file_path, videos_in_file, True)\n \n except Exception as e:\n print(f""Error creating file {file_path}: {e}"")\n return (file_path, 0, False)\n\n\ndef create_arrayrecord_dataset(\n output_dir: str,\n num_videos: int = 1000,\n seq_len: int = 16,\n height: int = 90,\n width: int = 160,\n channels: int = 3,\n records_per_file: int = 100,\n seed: int = 42,\n num_processes: int | None = None\n):\n """"""\n Create ArrayRecord dataset with synthetic video data using multiprocessing.\n \n Args:\n output_dir: Directory to save ArrayRecord files\n num_videos: Total number of videos to generate\n seq_len: Number of frames per video\n height: Frame height\n width: Frame width\n channels: Number of color channels\n records_per_file: Number of records per ArrayRecord file\n seed: Random seed for reproducibility\n num_processes: Number of processes to use (None for auto-detect)\n """"""\n os.makedirs(output_dir, exist_ok=True)\n \n num_files = (num_videos + records_per_file - 1) // records_per_file\n \n print(f""Generating {num_videos} videos across {num_files} ArrayRecord files..."")\n print(f""Each file will contain up to {records_per_file} videos"")\n print(f""Video dimensions: {seq_len} frames × {height}×{width}×{channels}"")\n \n # Prepare file information for each worker\n file_infos = []\n for file_idx in range(num_files):\n start_idx = file_idx * records_per_file\n end_idx = min((file_idx + 1) * records_per_file, num_videos)\n file_infos.append((file_idx, start_idx, end_idx))\n \n # Set number of processes (use CPU count if not specified)\n if num_processes is None:\n num_processes = min(mp.cpu_count(), num_files)\n \n print(f""Using {num_processes} processes for parallel generation..."")\n \n start_time = time.time()\n \n # Create partial function with fixed arguments\n worker_func = partial(\n create_single_arrayrecord_file,\n output_dir=output_dir,\n seq_len=seq_len,\n height=height,\n width=width,\n channels=channels,\n records_per_file=records_per_file,\n seed=seed\n )\n \n # Use multiprocessing to create files in parallel\n with mp.Pool(processes=num_processes) as pool:\n results = pool.map(worker_func, file_infos)\n \n end_time = time.time()\n \n # Process results\n total_records = 0\n successful_files = 0\n \n for file_path, num_videos_created, success in results:\n if success:\n print(f""✓ Created {file_path} with {num_videos_created} videos"")\n total_records += num_videos_created\n successful_files += 1\n else:\n print(f""✗ Failed to create {file_path}"")\n \n print(f""\nDataset generation complete!"")\n print(f""Total videos generated: {total_records}"")\n print(f""Successful files: {successful_files}/{num_files}"")\n print(f""Files created in: {output_dir}"")\n print(f""Generation time: {end_time - start_time:.2f} seconds"")\n\n\ndef verify_dataset(output_dir: str, num_samples: int = 5):\n """"""\n Verify the generated dataset using Grain's ArrayRecordDataSource.\n \n Args:\n output_dir: Directory containing ArrayRecord files\n num_samples: Number of samples to verify\n """"""\n print(f""\nVerifying dataset in {output_dir}..."")\n \n # Find all ArrayRecord files\n array_record_files = [\n os.path.join(output_dir, f) \n for f in os.listdir(output_dir) \n if f.endswith('.array_record')\n ]\n \n if not array_record_files:\n print(""No ArrayRecord files found!"")\n return\n \n print(f""Found {len(array_record_files)} ArrayRecord files"")\n \n # Use Grain's ArrayRecordDataSource as shown in the documentation\n try:\n data_source = grain.sources.ArrayRecordDataSource(array_record_files[0])\n print(f""Number of records in first file: {len(data_source)}"")\n \n # Load and verify a few samples\n for i in range(min(num_samples, len(data_source))):\n record_bytes = data_source[i]\n record = pickle.loads(record_bytes)\n \n # Reconstruct video from raw_video bytes\n video_shape = (record[""sequence_length""], 90, 160, 3) # Hardcoded for now\n video = np.frombuffer(record[""raw_video""], dtype=np.uint8).reshape(video_shape)\n \n print(f"" Record {i}: video shape = {video.shape}, dtype = {video.dtype}"")\n print(f"" Value range: [{video.min()}, {video.max()}]"")\n print(f"" Mean: {video.mean():.1f}"")\n print(f"" Sequence length: {record['sequence_length']}"")\n \n except Exception as e:\n print(f""Error reading ArrayRecord file: {e}"")\n print(""This might indicate a file format issue."")\n\n\ndef main():\n parser = argparse.ArgumentParser(description=""Generate ArrayRecord dataset for tokenizer training"")\n parser.add_argument(""--output_dir"", type=str, default=""data_arrayrecord/dummy"",\n help=""Output directory for ArrayRecord files"")\n parser.add_argument(""--num_videos"", type=int, default=1000,\n help=""Total number of videos to generate"")\n parser.add_argument(""--seq_len"", type=int, default=16,\n help=""Number of frames per video"")\n parser.add_argument(""--height"", type=int, default=90,\n help=""Frame height"")\n parser.add_argument(""--width"", type=int, default=160,\n help=""Frame width"")\n parser.add_argument(""--channels"", type=int, default=3,\n help=""Number of color channels"")\n parser.add_argument(""--records_per_file"", type=int, default=100,\n help=""Number of records per ArrayRecord file"")\n parser.add_argument(""--seed"", type=int, default=42,\n help=""Random seed for reproducibility"")\n parser.add_argument(""--num_processes"", type=int, default=None,\n help=""Number of processes to use (default: auto-detect)"")\n parser.add_argument(""--verify"", action=""store_true"",\n help=""Verify the generated dataset"")\n \n args = parser.parse_args()\n \n # Generate the dataset\n create_arrayrecord_dataset(\n output_dir=args.output_dir,\n num_videos=args.num_videos,\n seq_len=args.seq_len,\n height=args.height,\n width=args.width,\n channels=args.channels,\n records_per_file=args.records_per_file,\n seed=args.seed,\n num_processes=args.num_processes\n )\n \n # Verify if requested\n if args.verify:\n verify_dataset(args.output_dir)\n \nif __name__ == ""__main__"":\n main()\n",python,tab
|
31 |
+
30,183371,"TERMINAL",0,0,"bash",,terminal_focus
|
32 |
+
31,217610,"generate_arrayrecord_dataset.py",9009,0,"",python,selection_command
|
33 |
+
32,217614,"generate_arrayrecord_dataset.py",8127,0,"",python,selection_command
|
34 |
+
33,217620,"generate_arrayrecord_dataset.py",8069,0,"",python,selection_command
|
35 |
+
34,217625,"generate_arrayrecord_dataset.py",8088,0,"",python,selection_command
|
36 |
+
35,217628,"generate_arrayrecord_dataset.py",8092,0,"",python,selection_command
|
37 |
+
36,217633,"generate_arrayrecord_dataset.py",8094,0,"",python,selection_command
|
38 |
+
37,217641,"generate_arrayrecord_dataset.py",8101,0,"",python,selection_command
|
39 |
+
38,217644,"generate_arrayrecord_dataset.py",8104,0,"",python,selection_command
|
40 |
+
39,217647,"generate_arrayrecord_dataset.py",8101,0,"",python,selection_command
|
41 |
+
40,217649,"generate_arrayrecord_dataset.py",8094,0,"",python,selection_command
|
42 |
+
41,247808,"TERMINAL",0,0,"salloc",,terminal_focus
|
43 |
+
42,268574,"TERMINAL",0,0,"bash",,terminal_focus
|
44 |
+
43,269333,"TERMINAL",0,0,"salloc",,terminal_focus
|
45 |
+
44,270973,"TERMINAL",0,0,"bash",,terminal_focus
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4624dad2-2cdd-4760-a1d4-90d8cf5390e11752824923646-2025_07_18-09.49.17.759/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-46ca2edf-d936-40bf-b5cb-2bace160abae1751617999883-2025_07_04-16.08.06.165/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4719c5f9-1b15-4792-8afd-690761108bda1751617825355-2025_07_04-10.31.22.581/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-48fbb415-6db9-4d35-b548-561e828791bf1751383187013-2025_07_01-17.19.57.60/source.csv
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,2,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
3 |
+
2,121,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
4 |
+
3,886,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:19:57 PM [info] Activating crowd-code\n5:19:57 PM [info] Recording started\n5:19:57 PM [info] Initializing git provider using file system watchers...\n5:19:57 PM [info] Git repository found\n5:19:57 PM [info] Git provider initialized successfully\n5:19:57 PM [info] Initial git state: [object Object]\n",Log,content
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4925dddc-46dd-4a2d-8d37-761ea748b28d1753197051515-2025_07_22-17.10.58.842/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4c99cdc3-e81e-4e53-b728-37b9467466201751794872067-2025_07_06-11.41.50.869/source.csv
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,4,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"#!/usr/bin/env bash\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n\ndata_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/dummy'\n\nsrun python train_tokenizer.py \\n --batch_size 192 \\n --tmp_ckpt_dir $PWD/checkpoints/tokenizer_dummy_distributed_checkpointing_test \\n # --tmp_ckpt_dir /mnt/odfs/$SLURM_JOB_ID/stripe_default/tokenizer_dummy_distributed_checkpointing_test_tmp \\n #--ckpt_dir $PWD/checkpoints/tokenizer_dummy_distributed_checkpointing_test \\n --log_checkpoint_interval 5 \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
3 |
+
2,770,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:41:50 AM [info] Activating crowd-code\n11:41:50 AM [info] Recording started\n11:41:50 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,888,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"11:41:51 AM [info] Git repository found\n11:41:51 AM [info] Git provider initialized successfully\n11:41:51 AM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,13921,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"",shellscript,tab
|
6 |
+
5,30125,"TERMINAL",0,0,"",,terminal_focus
|
7 |
+
6,32545,"TERMINAL",0,0,"queue",,terminal_command
|
8 |
+
7,32594,"TERMINAL",0,0,"]633;E;2025-07-06 11:42:23 queue;df91212e-efcd-486e-955f-f6a267c32b1a]633;C",,terminal_output
|
9 |
+
8,32683,"TERMINAL",0,0,"[?1049h[22;0;0t[1;58r(B[m[4l[?7h[H[2JEvery 1.0s: squeue --me[1;48Hhkn1990.localdomain: Sun Jul 6 11:42:23 2025[3;14HJOBID PARTITION NAME USER ST\tTIME NODES NODELIST(REASON)[4;12H3321151 accelerat interact tum_dbd0 R[56G12:41\t 1 hkn0710[58;92H",,terminal_output
|
10 |
+
9,33707,"TERMINAL",0,0,"[1;87H4[4;60H2[58;92H",,terminal_output
|
11 |
+
10,34745,"TERMINAL",0,0,"[1;87H5[4;60H3[58;92H",,terminal_output
|
12 |
+
11,35825,"TERMINAL",0,0,"[1;87H6[4;60H4[58;92H",,terminal_output
|
13 |
+
12,36836,"TERMINAL",0,0,"[1;87H7[4;60H5[58;92H",,terminal_output
|
14 |
+
13,37264,"TERMINAL",0,0,"[58;1H[?1049l[23;0;0t\r[?1l>]0;tum_dbd0378@hkn1990:~/jafar]633;D;0",,terminal_output
|
15 |
+
14,37829,"TERMINAL",0,0,"bash",,terminal_focus
|
16 |
+
15,45022,"TERMINAL",0,0,"salloc --nodes=4 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=8 --time=10:00:00--partition=accelerated --account=hk-project-p0023960",,terminal_command
|
17 |
+
16,45069,"TERMINAL",0,0,"]633;E;2025-07-06 11:42:35 salloc --nodes=4 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=8 --time=10:00:00 --partition=accelerated --account=hk-project-p0023960;c1dd7136-d3d1-4c6f-9fc5-0bf4d8707279]633;Csalloc: Granted job allocation 3321167\r\n",,terminal_output
|
18 |
+
17,45181,"TERMINAL",0,0,"salloc: Waiting for resource configuration\r\n",,terminal_output
|
19 |
+
18,72241,"TERMINAL",0,0,"salloc: Nodes hkn[0627,0629,0634-0635] are ready for job\r\n",,terminal_output
|
20 |
+
19,73370,"TERMINAL",0,0,"]0;tum_dbd0378@hkn0627:~/jafar[?2004h[tum_dbd0378@hkn0627 jafar]$ ",,terminal_output
|
21 |
+
20,96077,"TERMINAL",0,0,"[?25lb[2ma[22m[2ms[22m[6;33H[?25h[?25l[6;31Ha[6;33H[?25h[?25l[6;32Hs[6;33H[?25h",,terminal_output
|
22 |
+
21,96210,"TERMINAL",0,0,"[?25l[6;33Hh[6;34H[?25h",,terminal_output
|
23 |
+
22,96372,"TERMINAL",0,0,"[?25l[6;34H [6;36H[?25h[?25l[6;35He[6;36H[?25h",,terminal_output
|
24 |
+
23,96581,"TERMINAL",0,0,"[?25l[6;36Hx[6;38H[?25h[?25l[6;37Hp[6;38H[?25h",,terminal_output
|
25 |
+
24,96680,"TERMINAL",0,0,"[?25l[6;38He[6;41H[?25h",,terminal_output
|
26 |
+
25,96801,"TERMINAL",0,0,"[?25l[6;39Hr[6;42H[?25h[?25l[6;40Hi[6;42H[?25h[?25l[6;41Hm[6;42H[?25h",,terminal_output
|
27 |
+
26,96897,"TERMINAL",0,0,"ents/",,terminal_output
|
28 |
+
27,98298,"TERMINAL",0,0,"[?25l[6;47Ht[6;49H[?25h",,terminal_output
|
29 |
+
28,98408,"TERMINAL",0,0,"[?25l[6;48Ho[6;50H[?25h[?25l[6;49Hk[6;50H[?25h",,terminal_output
|
30 |
+
29,98556,"TERMINAL",0,0,"enizer_",,terminal_output
|
31 |
+
30,98652,"TERMINAL",0,0,"d",,terminal_output
|
32 |
+
31,98819,"TERMINAL",0,0,"ummy_",,terminal_output
|
33 |
+
32,98930,"TERMINAL",0,0,"d",,terminal_output
|
34 |
+
33,99017,"TERMINAL",0,0,"istributed_checkpointing_test.sh ",,terminal_output
|
35 |
+
34,99285,"TERMINAL",0,0,"[?25l[?2004l\r[?25h",,terminal_output
|
36 |
+
35,99604,"TERMINAL",0,0,"GpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\nGpuFreq=control_disabled\r\n",,terminal_output
|
37 |
+
36,123866,"TERMINAL",0,0,"2025-07-06 11:43:54.542770: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.542770: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.542769: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.542771: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.547592: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.547601: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.547595: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.547600: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.551642: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.551644: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.551640: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.551643: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.578770: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.578768: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.578765: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n2025-07-06 11:43:54.578767: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n",,terminal_output
|
38 |
+
37,123932,"TERMINAL",0,0,"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643272 1077715 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643471 2175333 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643395 2393636 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643356 2175334 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643340 2393638 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643559 2175335 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643531 1077716 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643682 2175332 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643490 3222635 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643735 2393637 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643520 1077717 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643598 3222637 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643851 2393639 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643466 3222638 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643769 1077718 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751795034.643694 3222636 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751795034.649011 2393636 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649035 2393637 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649013 2393638 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649014 2393639 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649244 1077715 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649237 1077716 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649241 1077717 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649248 1077718 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649546 2175332 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649526 3222635 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649542 2175333 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649526 3222636 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649543 2175335 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649523 3222637 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649528 3222638 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\nE0000 00:00:1751795034.649786 2175334 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output
|
39 |
+
38,124232,"TERMINAL",0,0,"W0000 00:00:1751795034.890817 2393636 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890854 2393636 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890857 2393636 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890859 2393636 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890815 2393637 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890856 2393637 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890858 2393637 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890860 2393637 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890818 2393638 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890855 2393638 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890857 2393638 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890859 2393638 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890811 2393639 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890849 2393639 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890851 2393639 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.890854 2393639 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891848 3222635 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891873 3222635 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891876 3222635 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891878 3222635 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891853 3222636 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891877 3222636 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891879 3222636 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891881 3222636 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891851 3222637 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891875 3222637 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891877 3222637 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891879 3222637 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891850 3222638 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891878 3222638 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891880 3222638 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.891881 3222638 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894806 1077715 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894832 1077715 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894834 1077715 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894836 1077715 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894811 1077716 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894836 1077716 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894838 1077716 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894839 1077716 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894805 1077717 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894829 1077717 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894831 1077717 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894833 1077717 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894813 1077718 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894836 1077718 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894838 1077718 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.894839 1077718 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.895987 2175332 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896009 2175332 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896011 2175332 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896012 2175332 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.895983 2175334 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896009 2175334 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896011 2175334 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896013 2175334 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.895989 2175335 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896012 2175335 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896014 2175335 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896016 2175335 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896076 2175333 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896091 2175333 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896093 2175333 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751795034.896094 2175333 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output
|
40 |
+
39,155651,"TERMINAL",0,0,"W0000 00:00:1751795066.316761 3222638 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317314 2393637 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317253 2393638 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317404 1077715 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317256 2393639 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317426 1077716 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317366 2393636 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317406 1077717 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317437 1077718 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317375 3222637 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317752 3222635 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.317703 3222636 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.318242 2175332 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.318143 2175333 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.318166 2175335 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\nW0000 00:00:1751795066.318265 2175334 gpu_device.cc:2341] Cannot dlopen some GPU libraries. Please make sure the missing libraries mentioned above are installed properly if you would like to use GPU. Follow the guide at https://www.tensorflow.org/install/gpu for how to download and setup the required libraries for your platform.\r\nSkipping registering GPU devices...\r\n",,terminal_output
|
41 |
+
40,158511,"TERMINAL",0,0,"Running on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\nRunning on 16 devices.\r\n",,terminal_output
|
42 |
+
41,181841,"TERMINAL",0,0,"Counting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\n",,terminal_output
|
43 |
+
42,181914,"TERMINAL",0,0,"Counting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\n",,terminal_output
|
44 |
+
43,182034,"TERMINAL",0,0,"Counting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\n",,terminal_output
|
45 |
+
44,182357,"TERMINAL",0,0,"Counting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\n",,terminal_output
|
46 |
+
45,182554,"TERMINAL",0,0,"Counting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\nCounting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\n",,terminal_output
|
47 |
+
46,182644,"TERMINAL",0,0,"Counting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\n",,terminal_output
|
48 |
+
47,182723,"TERMINAL",0,0,"Counting all components: ['encoder', 'vq', 'decoder']\r\nParameter counts:\r\n{'encoder': 18978432, 'vq': 32768, 'decoder': 18978416, 'total': 37989616}\r\n",,terminal_output
|
49 |
+
48,187178,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\nTraceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/train_tokenizer.py"", line 216, in <module>\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\n for x in os.listdir(args.data_dir)\r\nFileNotFoundError: [Errno 2] No such file or directory: 'data_tfrecords/coinrun'\r\n",,terminal_output
|
50 |
+
49,189095,"TERMINAL",0,0,"srun: error: hkn0627: tasks 0,2: Exited with exit code 1\r\n",,terminal_output
|
51 |
+
50,189143,"TERMINAL",0,0,"srun: error: hkn0635: tasks 12,14-15: Exited with exit code 1\r\n",,terminal_output
|
52 |
+
51,189374,"TERMINAL",0,0,"srun: error: hkn0627: tasks 1,3: Exited with exit code 1\r\n",,terminal_output
|
53 |
+
52,189408,"TERMINAL",0,0,"srun: error: hkn0635: task 13: Exited with exit code 1\r\n",,terminal_output
|
54 |
+
53,191029,"TERMINAL",0,0,"srun: error: hkn0634: tasks 8,10: Exited with exit code 1\r\n",,terminal_output
|
55 |
+
54,191388,"TERMINAL",0,0,"srun: error: hkn0634: task 11: Exited with exit code 1\r\nsrun: error: hkn0629: tasks 4,6-7: Exited with exit code 1\r\nsrun: error: hkn0634: task 9: Exited with exit code 1\r\nsrun: error: hkn0629: task 5: Exited with exit code 1\r\nexperiments/tokenizer_dummy_distributed_checkpointing_test.sh: line 14: --log_checkpoint_interval: command not found\r\n]0;tum_dbd0378@hkn0627:~/jafar[?2004h[tum_dbd0378@hkn0627 jafar]$ ",,terminal_output
|
56 |
+
55,6242217,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"Switched from branch 'fix-cross-node-checkpointing' to 'main'",shellscript,git_branch_checkout
|
57 |
+
56,6297228,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"Switched from branch 'main' to 'fix-checkpoint-ts-deadlock'",shellscript,git_branch_checkout
|
58 |
+
57,8897497,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"Switched from branch 'fix-checkpoint-ts-deadlock' to 'main'",shellscript,git_branch_checkout
|
59 |
+
58,8907494,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"Switched from branch 'main' to 'dynamics-lam-co-training'",shellscript,git_branch_checkout
|
60 |
+
59,10032590,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"Switched from branch 'dynamics-lam-co-training' to 'main'",shellscript,git_branch_checkout
|
61 |
+
60,10162629,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"Switched from branch 'main' to 'dynamics-lam-co-training'",shellscript,git_branch_checkout
|
62 |
+
61,10192617,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"Switched from branch 'dynamics-lam-co-training' to 'main'",shellscript,git_branch_checkout
|
63 |
+
62,10247633,"experiments/tokenizer_dummy_distributed_checkpointing_test.sh",0,0,"Switched from branch 'main' to 'make-warmup-default'",shellscript,git_branch_checkout
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-4d1a0deb-e1f5-442d-9c20-e0948c9dbbd21751438388691-2025_07_02-08.40.20.700/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53035602-cd5a-4dad-bc79-2cb4d8d4f7681751162692203-2025_06_28-19.04.53.413/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53473ec0-46ca-440e-9f03-b85aa9497ebf1751358924978-2025_07_01-10.36.06.808/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-53a6d9e8-1004-4f9b-86d8-e54e29f62e511751401053914-2025_07_01-22.18.23.766/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-5b1a6152-1602-4538-a4b1-6fa9507221151753212707189-2025_07_22-21.32.36.855/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-5e81a867-cde0-4985-b3ad-048a43f856df1751310142803-2025_06_30-12.02.24.285/source.csv
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,2,"tasks",0,0,"",Log,tab
|
3 |
+
2,34,"models/tokenizer.py",0,0,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = nn.sigmoid(recon)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = nn.sigmoid(recon)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,tab
|
4 |
+
3,47,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
5 |
+
4,68,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"12:02:24 PM [info] Activating crowd-code\n12:02:24 PM [info] Recording started\n12:02:24 PM [info] Initializing git provider using file system watchers...\n12:02:24 PM [info] Git repository found\n12:02:24 PM [info] Git provider initialized successfully\n",Log,content
|
6 |
+
5,86,"extension-output-pdoom-org.crowd-code-#1-crowd-code",250,0,"12:02:24 PM [info] Initial git state: [object Object]\n",Log,content
|
7 |
+
6,1994,"models/tokenizer.py",0,0,"",python,tab
|
8 |
+
7,2010,"TERMINAL",0,0,"",,terminal_focus
|
9 |
+
8,9708,"requirements.txt",0,0,"dm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.8.5\njax>=0.4.30\noptax>=0.2.3\n# procgen>=0.10.7\ntyro>=0.8.5\nwandb>=0.17.4\ntensorflow>=2.1\npre-commit>=4.2.0",pip-requirements,tab
|
10 |
+
9,10459,"requirements.txt",55,0,"",pip-requirements,selection_command
|
11 |
+
10,10700,"requirements.txt",68,0,"",pip-requirements,selection_command
|
12 |
+
11,10732,"requirements.txt",86,0,"",pip-requirements,selection_command
|
13 |
+
12,10767,"requirements.txt",98,0,"",pip-requirements,selection_command
|
14 |
+
13,10799,"requirements.txt",112,0,"",pip-requirements,selection_command
|
15 |
+
14,10834,"requirements.txt",128,0,"",pip-requirements,selection_command
|
16 |
+
15,11032,"requirements.txt",112,0,"",pip-requirements,selection_command
|
17 |
+
16,11198,"requirements.txt",98,0,"",pip-requirements,selection_command
|
18 |
+
17,150706,"vscode.git.Git",0,0,"2025-06-30 12:02:18.847 [info] [main] Log level: Info\n2025-06-30 12:02:18.854 [info] [main] Validating found git in: ""/usr/bin/git""\n2025-06-30 12:02:18.881 [info] [main] Using git ""2.39.5 (Apple Git-154)"" from ""/usr/bin/git""\n2025-06-30 12:02:18.881 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-30 12:02:18.883 [info] [Model][doInitialScan] Initial repository scan completed - repositories (0), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-30 12:02:19.417 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [14ms]\n2025-06-30 12:02:19.417 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.427 [info] > git rev-parse --verify origin/main [9ms]\n2025-06-30 12:02:19.427 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.437 [info] > git rev-parse --verify origin/master [10ms]\n2025-06-30 12:02:19.437 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.446 [info] > git rev-parse --verify origin/develop [9ms]\n2025-06-30 12:02:19.446 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.454 [info] > git branch -r [7ms]\n2025-06-30 12:02:19.454 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.464 [info] > git config --get init.defaultBranch [9ms]\n2025-06-30 12:02:19.833 [info] > git status [13ms]\n2025-06-30 12:02:19.833 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:23.888 [info] [main] Log level: Info\n2025-06-30 12:02:23.891 [info] [main] Validating found git in: ""/usr/bin/git""\n2025-06-30 12:02:23.910 [info] [main] Using git ""2.39.5 (Apple Git-154)"" from ""/usr/bin/git""\n2025-06-30 12:02:23.910 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-30 12:02:24.289 [info] > git rev-parse --show-toplevel [373ms]\n2025-06-30 12:02:24.341 [info] > git rev-parse --git-dir --git-common-dir [21ms]\n2025-06-30 12:02:24.346 [info] [Model][openRepository] Opened repository: /Users/franzsrambical/Documents/pdoom/jafar\n2025-06-30 12:02:24.366 [info] > git rev-parse --show-toplevel [18ms]\n2025-06-30 12:02:24.366 [info] > git config --get commit.template [18ms]\n2025-06-30 12:02:24.377 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [25ms]\n2025-06-30 12:02:24.382 [info] > git rev-parse --show-toplevel [14ms]\n2025-06-30 12:02:24.392 [info] > git rev-parse --show-toplevel [10ms]\n2025-06-30 12:02:24.393 [info] > git status -z -uall [14ms]\n2025-06-30 12:02:24.401 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [20ms]\n2025-06-30 12:02:24.427 [info] > git config --get commit.template [21ms]\n2025-06-30 12:02:24.427 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [23ms]\n2025-06-30 12:02:24.428 [info] > git rev-parse --show-toplevel [36ms]\n2025-06-30 12:02:24.441 [info] > git config --local branch.main.vscode-merge-base [13ms]\n2025-06-30 12:02:24.444 [info] > git rev-parse --show-toplevel [15ms]\n2025-06-30 12:02:24.447 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [18ms]\n2025-06-30 12:02:24.470 [info] > git rev-parse --show-toplevel [23ms]\n2025-06-30 12:02:24.471 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/origin/main refs/remotes/origin/main [29ms]\n2025-06-30 12:02:24.472 [info] > git status -z -uall [24ms]\n2025-06-30 12:02:24.477 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [25ms]\n2025-06-30 12:02:24.482 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [32ms]\n2025-06-30 12:02:24.505 [info] > git merge-base refs/heads/main refs/remotes/origin/main [34ms]\n2025-06-30 12:02:24.511 [info] > git rev-parse --show-toplevel [37ms]\n2025-06-30 12:02:24.516 [info] > git merge-base refs/heads/main refs/remotes/origin/main [32ms]\n2025-06-30 12:02:24.539 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [33ms]\n2025-06-30 12:02:24.545 [info] > git rev-parse --show-toplevel [30ms]\n2025-06-30 12:02:24.546 [info] [Model][doInitialScan] Initial repository scan completed - repositories (1), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-30 12:02:24.549 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [32ms]\n2025-06-30 12:02:24.573 [info] > git rev-parse --show-toplevel [24ms]\n2025-06-30 12:02:24.700 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [31ms]\n2025-06-30 12:02:24.701 [info] > git show --textconv :models/tokenizer.py [34ms]\n2025-06-30 12:02:24.733 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [32ms]\n2025-06-30 12:02:24.833 [info] > git status [41ms]\n2025-06-30 12:02:24.904 [info] > git check-ignore -v -z --stdin [46ms]\n2025-06-30 12:02:25.584 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [12ms]\n2025-06-30 12:02:25.594 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [10ms]\n2025-06-30 12:02:25.607 [info] > git show --textconv :models/tokenizer.py [9ms]\n2025-06-30 12:02:26.301 [info] > git blame --root --incremental 15aa06ec6d6ba69a2d123d5e28b918fec749829a -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [22ms]\n2025-06-30 12:02:28.510 [info] > git fetch [4165ms]\n2025-06-30 12:02:28.510 [info] From github.com:p-doom/jafar\n 15aa06e..a537770 main -> origin/main\n * [new branch] convert-to-jax-array-in-iter -> origin/convert-to-jax-array-in-iter\n * [new branch] feat/explicit-image-dims -> origin/feat/explicit-image-dims\n d95ca45..a97a559 feature/model-parameter-count-utils -> origin/feature/model-parameter-count-utils\n * [new branch] fix_preprocess_video_paths -> origin/fix_preprocess_video_paths\n2025-06-30 12:02:28.521 [info] > git config --get commit.template [10ms]\n2025-06-30 12:02:28.524 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [12ms]\n2025-06-30 12:02:28.537 [info] > git status -z -uall [13ms]\n2025-06-30 12:02:28.539 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [13ms]\n2025-06-30 12:02:28.552 [info] > git merge-base refs/heads/main refs/remotes/origin/main [12ms]\n2025-06-30 12:02:28.563 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [10ms]\n2025-06-30 12:02:29.712 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [21ms]\n2025-06-30 12:02:29.730 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [17ms]\n2025-06-30 12:02:29.748 [info] > git show --textconv :models/tokenizer.py [16ms]\n2025-06-30 12:02:31.082 [info] > git fetch --all [3398ms]\n2025-06-30 12:02:31.102 [info] > git config --get commit.template [17ms]\n2025-06-30 12:02:31.106 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [19ms]\n2025-06-30 12:02:31.120 [info] > git status -z -uall [14ms]\n2025-06-30 12:02:31.122 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:02:32.020 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [26ms]\n2025-06-30 12:02:32.039 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [19ms]\n2025-06-30 12:02:32.054 [info] > git show --textconv :models/tokenizer.py [14ms]\n2025-06-30 12:02:33.933 [info] > git config --global user.name [24ms]\n2025-06-30 12:02:33.951 [info] > git config --global user.email [17ms]\n2025-06-30 12:02:33.951 [info] [main] Stored git author name in global state: Franz Srambical <[email protected]>\n2025-06-30 12:02:34.206 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [10ms]\n2025-06-30 12:02:34.207 [info] > git show --textconv :requirements.txt [11ms]\n2025-06-30 12:02:34.217 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [10ms]\n2025-06-30 12:02:34.234 [info] > git blame --root --incremental 15aa06ec6d6ba69a2d123d5e28b918fec749829a -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [14ms]\n2025-06-30 12:02:34.534 [info] > git check-ignore -v -z --stdin [18ms]\n2025-06-30 12:02:34.884 [info] > git config --get commit.template [18ms]\n2025-06-30 12:02:34.888 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [22ms]\n2025-06-30 12:02:34.902 [info] > git status -z -uall [13ms]\n2025-06-30 12:02:34.904 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:02:34.976 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:02:34.992 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [16ms]\n2025-06-30 12:02:35.019 [info] > git show --textconv :requirements.txt [25ms]\n2025-06-30 12:02:35.023 [info] > git status [21ms]\n2025-06-30 12:04:43.429 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [26ms]\n2025-06-30 12:04:43.444 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [13ms]\n2025-06-30 12:04:43.459 [info] > git show --textconv :requirements.txt [13ms]\n2025-06-30 12:04:45.656 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [30ms]\n2025-06-30 12:04:46.302 [info] > git fetch [4119ms]\n2025-06-30 12:04:46.302 [info] From github.com:p-doom/jafar\n * [new branch] grain-dataloader -> origin/grain-dataloader\n2025-06-30 12:04:46.315 [info] > git config --get commit.template [11ms]\n2025-06-30 12:04:46.319 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [16ms]\n2025-06-30 12:04:46.331 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [11ms]\n2025-06-30 12:04:46.332 [info] > git status -z -uall [13ms]\n2025-06-30 12:04:47.421 [info] > git config --get commit.template [28ms]\n2025-06-30 12:04:47.425 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [30ms]\n2025-06-30 12:04:47.440 [info] > git status -z -uall [14ms]\n2025-06-30 12:04:47.442 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:04:47.507 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:04:47.518 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [11ms]\n2025-06-30 12:04:47.530 [info] > git show --textconv :requirements.txt [11ms]\n2025-06-30 12:04:49.187 [info] > git log --oneline --cherry main...main@{upstream} -- [17ms]\n2025-06-30 12:04:50.433 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [28ms]\n2025-06-30 12:04:50.448 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [14ms]\n2025-06-30 12:04:50.462 [info] > git show --textconv :requirements.txt [12ms]\n2025-06-30 12:04:53.045 [info] > git pull --tags origin main [3857ms]\n2025-06-30 12:04:53.045 [info] From github.com:p-doom/jafar\n * branch main -> FETCH_HEAD\nerror: Your local changes to the following files would be overwritten by merge:\n\trequirements.txt\nPlease commit your changes or stash them before you merge.\nAborting\n2025-06-30 12:04:53.062 [info] > git config --get commit.template [14ms]\n2025-06-30 12:04:53.064 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [16ms]\n2025-06-30 12:04:53.077 [info] > git status -z -uall [12ms]\n2025-06-30 12:04:53.078 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [12ms]\n2025-06-30 12:04:54.214 [info] > git config --get commit.template [18ms]\n2025-06-30 12:04:54.217 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [19ms]\n2025-06-30 12:04:54.231 [info] > git status -z -uall [13ms]\n2025-06-30 12:04:54.232 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [14ms]\n2025-06-30 12:04:54.308 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:04:54.319 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [11ms]\n2025-06-30 12:04:54.330 [info] > git show --textconv :requirements.txt [10ms]\n",log,tab
|
19 |
+
18,151217,"vscode.git.Git",14002,0,"2025-06-30 12:04:55.496 [info] > git check-ignore -v -z --stdin [21ms]\n",log,content
|
20 |
+
19,151739,"requirements.txt",0,0,"",pip-requirements,tab
|
21 |
+
20,152857,"requirements.txt",0,0,"",pip-requirements,tab
|
22 |
+
21,152869,"requirements.txt",40,0,"",pip-requirements,selection_command
|
23 |
+
22,157919,"requirements.txt",40,43,"jax[cuda12]>=0.4.30\noptax>=0.2.3\nprocgen>=0.10.7\n",pip-requirements,content
|
24 |
+
23,158930,"requirements.txt",0,0,"",pip-requirements,tab
|
25 |
+
24,164840,"requirements.txt",148,0,"\ntqdm>=4.67.1",pip-requirements,content
|
26 |
+
25,166157,"models/tokenizer.py",0,0,"",python,tab
|
27 |
+
26,170155,"models/tokenizer.py",0,0,"Switched from branch 'main' to 'grain-dataloader'",python,git_branch_checkout
|
28 |
+
27,172580,"vscode.git.Git",0,0,"2025-06-30 12:02:18.847 [info] [main] Log level: Info\n2025-06-30 12:02:18.854 [info] [main] Validating found git in: ""/usr/bin/git""\n2025-06-30 12:02:18.881 [info] [main] Using git ""2.39.5 (Apple Git-154)"" from ""/usr/bin/git""\n2025-06-30 12:02:18.881 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-30 12:02:18.883 [info] [Model][doInitialScan] Initial repository scan completed - repositories (0), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-30 12:02:19.417 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [14ms]\n2025-06-30 12:02:19.417 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.427 [info] > git rev-parse --verify origin/main [9ms]\n2025-06-30 12:02:19.427 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.437 [info] > git rev-parse --verify origin/master [10ms]\n2025-06-30 12:02:19.437 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.446 [info] > git rev-parse --verify origin/develop [9ms]\n2025-06-30 12:02:19.446 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.454 [info] > git branch -r [7ms]\n2025-06-30 12:02:19.454 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:19.464 [info] > git config --get init.defaultBranch [9ms]\n2025-06-30 12:02:19.833 [info] > git status [13ms]\n2025-06-30 12:02:19.833 [info] fatal: not a git repository (or any of the parent directories): .git\n2025-06-30 12:02:23.888 [info] [main] Log level: Info\n2025-06-30 12:02:23.891 [info] [main] Validating found git in: ""/usr/bin/git""\n2025-06-30 12:02:23.910 [info] [main] Using git ""2.39.5 (Apple Git-154)"" from ""/usr/bin/git""\n2025-06-30 12:02:23.910 [info] [Model][doInitialScan] Initial repository scan started\n2025-06-30 12:02:24.289 [info] > git rev-parse --show-toplevel [373ms]\n2025-06-30 12:02:24.341 [info] > git rev-parse --git-dir --git-common-dir [21ms]\n2025-06-30 12:02:24.346 [info] [Model][openRepository] Opened repository: /Users/franzsrambical/Documents/pdoom/jafar\n2025-06-30 12:02:24.366 [info] > git rev-parse --show-toplevel [18ms]\n2025-06-30 12:02:24.366 [info] > git config --get commit.template [18ms]\n2025-06-30 12:02:24.377 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [25ms]\n2025-06-30 12:02:24.382 [info] > git rev-parse --show-toplevel [14ms]\n2025-06-30 12:02:24.392 [info] > git rev-parse --show-toplevel [10ms]\n2025-06-30 12:02:24.393 [info] > git status -z -uall [14ms]\n2025-06-30 12:02:24.401 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [20ms]\n2025-06-30 12:02:24.427 [info] > git config --get commit.template [21ms]\n2025-06-30 12:02:24.427 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [23ms]\n2025-06-30 12:02:24.428 [info] > git rev-parse --show-toplevel [36ms]\n2025-06-30 12:02:24.441 [info] > git config --local branch.main.vscode-merge-base [13ms]\n2025-06-30 12:02:24.444 [info] > git rev-parse --show-toplevel [15ms]\n2025-06-30 12:02:24.447 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [18ms]\n2025-06-30 12:02:24.470 [info] > git rev-parse --show-toplevel [23ms]\n2025-06-30 12:02:24.471 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/origin/main refs/remotes/origin/main [29ms]\n2025-06-30 12:02:24.472 [info] > git status -z -uall [24ms]\n2025-06-30 12:02:24.477 [info] > git symbolic-ref --short refs/remotes/origin/HEAD [25ms]\n2025-06-30 12:02:24.482 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [32ms]\n2025-06-30 12:02:24.505 [info] > git merge-base refs/heads/main refs/remotes/origin/main [34ms]\n2025-06-30 12:02:24.511 [info] > git rev-parse --show-toplevel [37ms]\n2025-06-30 12:02:24.516 [info] > git merge-base refs/heads/main refs/remotes/origin/main [32ms]\n2025-06-30 12:02:24.539 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [33ms]\n2025-06-30 12:02:24.545 [info] > git rev-parse --show-toplevel [30ms]\n2025-06-30 12:02:24.546 [info] [Model][doInitialScan] Initial repository scan completed - repositories (1), closed repositories (0), parent repositories (0), unsafe repositories (0)\n2025-06-30 12:02:24.549 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [32ms]\n2025-06-30 12:02:24.573 [info] > git rev-parse --show-toplevel [24ms]\n2025-06-30 12:02:24.700 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [31ms]\n2025-06-30 12:02:24.701 [info] > git show --textconv :models/tokenizer.py [34ms]\n2025-06-30 12:02:24.733 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [32ms]\n2025-06-30 12:02:24.833 [info] > git status [41ms]\n2025-06-30 12:02:24.904 [info] > git check-ignore -v -z --stdin [46ms]\n2025-06-30 12:02:25.584 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [12ms]\n2025-06-30 12:02:25.594 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [10ms]\n2025-06-30 12:02:25.607 [info] > git show --textconv :models/tokenizer.py [9ms]\n2025-06-30 12:02:26.301 [info] > git blame --root --incremental 15aa06ec6d6ba69a2d123d5e28b918fec749829a -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [22ms]\n2025-06-30 12:02:28.510 [info] > git fetch [4165ms]\n2025-06-30 12:02:28.510 [info] From github.com:p-doom/jafar\n 15aa06e..a537770 main -> origin/main\n * [new branch] convert-to-jax-array-in-iter -> origin/convert-to-jax-array-in-iter\n * [new branch] feat/explicit-image-dims -> origin/feat/explicit-image-dims\n d95ca45..a97a559 feature/model-parameter-count-utils -> origin/feature/model-parameter-count-utils\n * [new branch] fix_preprocess_video_paths -> origin/fix_preprocess_video_paths\n2025-06-30 12:02:28.521 [info] > git config --get commit.template [10ms]\n2025-06-30 12:02:28.524 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [12ms]\n2025-06-30 12:02:28.537 [info] > git status -z -uall [13ms]\n2025-06-30 12:02:28.539 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [13ms]\n2025-06-30 12:02:28.552 [info] > git merge-base refs/heads/main refs/remotes/origin/main [12ms]\n2025-06-30 12:02:28.563 [info] > git diff --name-status -z --diff-filter=ADMR 15aa06ec6d6ba69a2d123d5e28b918fec749829a...refs/remotes/origin/main [10ms]\n2025-06-30 12:02:29.712 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [21ms]\n2025-06-30 12:02:29.730 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [17ms]\n2025-06-30 12:02:29.748 [info] > git show --textconv :models/tokenizer.py [16ms]\n2025-06-30 12:02:31.082 [info] > git fetch --all [3398ms]\n2025-06-30 12:02:31.102 [info] > git config --get commit.template [17ms]\n2025-06-30 12:02:31.106 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [19ms]\n2025-06-30 12:02:31.120 [info] > git status -z -uall [14ms]\n2025-06-30 12:02:31.122 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:02:32.020 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [26ms]\n2025-06-30 12:02:32.039 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [19ms]\n2025-06-30 12:02:32.054 [info] > git show --textconv :models/tokenizer.py [14ms]\n2025-06-30 12:02:33.933 [info] > git config --global user.name [24ms]\n2025-06-30 12:02:33.951 [info] > git config --global user.email [17ms]\n2025-06-30 12:02:33.951 [info] [main] Stored git author name in global state: Franz Srambical <[email protected]>\n2025-06-30 12:02:34.206 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [10ms]\n2025-06-30 12:02:34.207 [info] > git show --textconv :requirements.txt [11ms]\n2025-06-30 12:02:34.217 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [10ms]\n2025-06-30 12:02:34.234 [info] > git blame --root --incremental 15aa06ec6d6ba69a2d123d5e28b918fec749829a -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [14ms]\n2025-06-30 12:02:34.534 [info] > git check-ignore -v -z --stdin [18ms]\n2025-06-30 12:02:34.884 [info] > git config --get commit.template [18ms]\n2025-06-30 12:02:34.888 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [22ms]\n2025-06-30 12:02:34.902 [info] > git status -z -uall [13ms]\n2025-06-30 12:02:34.904 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:02:34.976 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:02:34.992 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [16ms]\n2025-06-30 12:02:35.019 [info] > git show --textconv :requirements.txt [25ms]\n2025-06-30 12:02:35.023 [info] > git status [21ms]\n2025-06-30 12:04:43.429 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [26ms]\n2025-06-30 12:04:43.444 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [13ms]\n2025-06-30 12:04:43.459 [info] > git show --textconv :requirements.txt [13ms]\n2025-06-30 12:04:45.656 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [30ms]\n2025-06-30 12:04:46.302 [info] > git fetch [4119ms]\n2025-06-30 12:04:46.302 [info] From github.com:p-doom/jafar\n * [new branch] grain-dataloader -> origin/grain-dataloader\n2025-06-30 12:04:46.315 [info] > git config --get commit.template [11ms]\n2025-06-30 12:04:46.319 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [16ms]\n2025-06-30 12:04:46.331 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [11ms]\n2025-06-30 12:04:46.332 [info] > git status -z -uall [13ms]\n2025-06-30 12:04:47.421 [info] > git config --get commit.template [28ms]\n2025-06-30 12:04:47.425 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [30ms]\n2025-06-30 12:04:47.440 [info] > git status -z -uall [14ms]\n2025-06-30 12:04:47.442 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:04:47.507 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:04:47.518 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [11ms]\n2025-06-30 12:04:47.530 [info] > git show --textconv :requirements.txt [11ms]\n2025-06-30 12:04:49.187 [info] > git log --oneline --cherry main...main@{upstream} -- [17ms]\n2025-06-30 12:04:50.433 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [28ms]\n2025-06-30 12:04:50.448 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [14ms]\n2025-06-30 12:04:50.462 [info] > git show --textconv :requirements.txt [12ms]\n2025-06-30 12:04:53.045 [info] > git pull --tags origin main [3857ms]\n2025-06-30 12:04:53.045 [info] From github.com:p-doom/jafar\n * branch main -> FETCH_HEAD\nerror: Your local changes to the following files would be overwritten by merge:\n\trequirements.txt\nPlease commit your changes or stash them before you merge.\nAborting\n2025-06-30 12:04:53.062 [info] > git config --get commit.template [14ms]\n2025-06-30 12:04:53.064 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [16ms]\n2025-06-30 12:04:53.077 [info] > git status -z -uall [12ms]\n2025-06-30 12:04:53.078 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [12ms]\n2025-06-30 12:04:54.214 [info] > git config --get commit.template [18ms]\n2025-06-30 12:04:54.217 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [19ms]\n2025-06-30 12:04:54.231 [info] > git status -z -uall [13ms]\n2025-06-30 12:04:54.232 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [14ms]\n2025-06-30 12:04:54.308 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [13ms]\n2025-06-30 12:04:54.319 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [11ms]\n2025-06-30 12:04:54.330 [info] > git show --textconv :requirements.txt [10ms]\n2025-06-30 12:04:55.496 [info] > git check-ignore -v -z --stdin [21ms]\n2025-06-30 12:04:56.056 [info] > git log --format=%H%n%aN%n%aE%n%at%n%ct%n%P%n%D%n%B -z --shortstat --diff-merges=first-parent -n50 --skip=0 --topo-order --decorate=full --stdin [52ms]\n2025-06-30 12:04:57.126 [info] > git show --textconv HEAD:requirements.txt [15ms]\n2025-06-30 12:04:57.126 [info] > git ls-tree -l HEAD -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [15ms]\n2025-06-30 12:04:58.190 [info] > git status [22ms]\n2025-06-30 12:05:02.110 [info] > git checkout -q -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [24ms]\n2025-06-30 12:05:02.126 [info] > git config --get commit.template [14ms]\n2025-06-30 12:05:02.130 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [18ms]\n2025-06-30 12:05:02.146 [info] > git status -z -uall [15ms]\n2025-06-30 12:05:02.148 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [17ms]\n2025-06-30 12:05:03.218 [info] > git config --get commit.template [16ms]\n2025-06-30 12:05:03.221 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [18ms]\n2025-06-30 12:05:03.238 [info] > git status -z -uall [16ms]\n2025-06-30 12:05:03.240 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [18ms]\n2025-06-30 12:05:03.435 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [15ms]\n2025-06-30 12:05:03.435 [info] > git show --textconv :requirements.txt [16ms]\n2025-06-30 12:05:03.448 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [13ms]\n2025-06-30 12:05:04.249 [info] > git status [23ms]\n2025-06-30 12:05:05.264 [info] > git log --oneline --cherry main...main@{upstream} -- [21ms]\n2025-06-30 12:05:06.509 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [26ms]\n2025-06-30 12:05:06.526 [info] > git cat-file -s 3e3443dda2156a35a1a6687c9d355ac43133ad47 [16ms]\n2025-06-30 12:05:06.542 [info] > git show --textconv :requirements.txt [14ms]\n2025-06-30 12:05:08.981 [info] > git pull --tags origin main [3715ms]\n2025-06-30 12:05:08.981 [info] From github.com:p-doom/jafar\n * branch main -> FETCH_HEAD\n2025-06-30 12:05:08.992 [info] > git config --get commit.template [10ms]\n2025-06-30 12:05:08.994 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [12ms]\n2025-06-30 12:05:09.005 [info] > git status -z -uall [11ms]\n2025-06-30 12:05:09.007 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [12ms]\n2025-06-30 12:05:09.022 [info] > git config --get commit.template [12ms]\n2025-06-30 12:05:09.023 [info] > git merge-base refs/heads/main refs/remotes/origin/main [14ms]\n2025-06-30 12:05:09.024 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [13ms]\n2025-06-30 12:05:09.028 [info] > git blame --root --incremental a5377700beef39392cb955d5aa209f57348be60e -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [20ms]\n2025-06-30 12:05:09.036 [info] > git diff --name-status -z --diff-filter=ADMR a5377700beef39392cb955d5aa209f57348be60e...refs/remotes/origin/main [12ms]\n2025-06-30 12:05:09.036 [info] > git status -z -uall [12ms]\n2025-06-30 12:05:09.038 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [13ms]\n2025-06-30 12:05:10.254 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/requirements.txt [22ms]\n2025-06-30 12:05:10.269 [info] > git cat-file -s e5ccb3bb2b6678f7bb5d57f845ea477a9d792dcb [15ms]\n2025-06-30 12:05:10.282 [info] > git show --textconv :requirements.txt [12ms]\n2025-06-30 12:05:10.659 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [13ms]\n2025-06-30 12:05:10.661 [info] > git show --textconv :models/tokenizer.py [16ms]\n2025-06-30 12:05:10.671 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [11ms]\n2025-06-30 12:05:10.686 [info] > git blame --root --incremental a5377700beef39392cb955d5aa209f57348be60e -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [12ms]\n2025-06-30 12:05:11.488 [info] > git status [28ms]\n2025-06-30 12:05:12.713 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [21ms]\n2025-06-30 12:05:12.728 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [15ms]\n2025-06-30 12:05:12.742 [info] > git show --textconv :models/tokenizer.py [13ms]\n2025-06-30 12:05:12.974 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [26ms]\n2025-06-30 12:05:14.058 [info] > git for-each-ref --format %(refname:short)%00%(upstream:short) refs/heads [28ms]\n2025-06-30 12:05:14.060 [info] > git config --get commit.template [17ms]\n2025-06-30 12:05:14.061 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/main refs/remotes/main [17ms]\n2025-06-30 12:05:14.075 [info] > git status -z -uall [13ms]\n2025-06-30 12:05:14.077 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [15ms]\n2025-06-30 12:05:14.080 [info] > git checkout -q --track origin/grain-dataloader [20ms]\n2025-06-30 12:05:14.092 [info] > git config --get commit.template [11ms]\n2025-06-30 12:05:14.094 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/grain-dataloader refs/remotes/grain-dataloader [12ms]\n2025-06-30 12:05:14.106 [info] > git status -z -uall [12ms]\n2025-06-30 12:05:14.107 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [12ms]\n2025-06-30 12:05:14.122 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/grain-dataloader refs/remotes/grain-dataloader [13ms]\n2025-06-30 12:05:14.123 [info] > git blame --root --incremental 1fecb21cda58d5a0fd3af4ecf40c811aba79dbdf -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [14ms]\n2025-06-30 12:05:14.131 [info] > git config --local branch.grain-dataloader.vscode-merge-base [8ms]\n2025-06-30 12:05:14.131 [warning] [Git][config] git config failed: Failed to execute git\n2025-06-30 12:05:14.141 [info] > git reflog grain-dataloader --grep-reflog=branch: Created from *. [9ms]\n2025-06-30 12:05:14.150 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/origin/grain-dataloader refs/remotes/origin/grain-dataloader [9ms]\n2025-06-30 12:05:14.158 [info] > git config --local branch.grain-dataloader.vscode-merge-base origin/grain-dataloader [8ms]\n2025-06-30 12:05:14.167 [info] > git config --get commit.template [8ms]\n2025-06-30 12:05:14.168 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/grain-dataloader refs/remotes/grain-dataloader [8ms]\n2025-06-30 12:05:14.178 [info] > git status -z -uall [9ms]\n2025-06-30 12:05:14.179 [info] > git for-each-ref --sort -committerdate --format %(refname) %(objectname) %(*objectname) [10ms]\n2025-06-30 12:05:14.190 [info] > git merge-base refs/heads/grain-dataloader refs/remotes/origin/grain-dataloader [10ms]\n2025-06-30 12:05:14.191 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/grain-dataloader refs/remotes/grain-dataloader [11ms]\n2025-06-30 12:05:14.200 [info] > git config --local branch.grain-dataloader.vscode-merge-base [8ms]\n2025-06-30 12:05:14.201 [info] > git diff --name-status -z --diff-filter=ADMR 1fecb21cda58d5a0fd3af4ecf40c811aba79dbdf...refs/remotes/origin/grain-dataloader [10ms]\n2025-06-30 12:05:14.209 [info] > git for-each-ref --format=%(refname)%00%(upstream:short)%00%(objectname)%00%(upstream:track)%00%(upstream:remotename)%00%(upstream:remoteref) --ignore-case refs/heads/origin/grain-dataloader refs/remotes/origin/grain-dataloader [8ms]\n2025-06-30 12:05:15.437 [info] > git ls-files --stage -- /Users/franzsrambical/Documents/pdoom/jafar/models/tokenizer.py [26ms]\n2025-06-30 12:05:15.451 [info] > git cat-file -s 049e7d1bae78714245c689dc80e9962d0997f2af [13ms]\n2025-06-30 12:05:15.465 [info] > git show --textconv :models/tokenizer.py [12ms]\n",log,tab
|
29 |
+
28,174091,"models/tokenizer.py",0,0,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = nn.sigmoid(recon)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = nn.sigmoid(recon)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,tab
|
30 |
+
29,187010,"requirements.txt",0,0,"dm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.8.5\njax[cuda12]>=0.4.30\noptax>=0.2.3\nprocgen>=0.10.7\ntyro>=0.8.5\nwandb>=0.17.4\ngrain>=0.2.10\npre-commit>=4.2.0\narray-record>=0.7.2",pip-requirements,tab
|
31 |
+
30,188335,"requirements.txt",92,0,"",pip-requirements,selection_command
|
32 |
+
31,188443,"requirements.txt",76,0,"",pip-requirements,selection_command
|
33 |
+
32,188773,"requirements.txt",73,0,"",pip-requirements,selection_command
|
34 |
+
33,188909,"requirements.txt",73,0,"#",pip-requirements,content
|
35 |
+
34,188912,"requirements.txt",74,0,"",pip-requirements,selection_keyboard
|
36 |
+
35,188922,"requirements.txt",74,0," ",pip-requirements,content
|
37 |
+
36,188923,"requirements.txt",75,0,"",pip-requirements,selection_keyboard
|
38 |
+
37,189133,"requirements.txt",74,0,"",pip-requirements,selection_command
|
39 |
+
38,189722,"requirements.txt",61,0,"",pip-requirements,selection_command
|
40 |
+
39,189863,"requirements.txt",41,0,"",pip-requirements,selection_command
|
41 |
+
40,190051,"requirements.txt",43,0,"",pip-requirements,selection_command
|
42 |
+
41,190435,"requirements.txt",43,1,"[",pip-requirements,selection_command
|
43 |
+
42,190496,"requirements.txt",43,7,"[cuda12",pip-requirements,selection_command
|
44 |
+
43,190920,"requirements.txt",43,8,"[cuda12]",pip-requirements,selection_command
|
45 |
+
44,190969,"requirements.txt",43,8,"",pip-requirements,content
|
46 |
+
45,196104,"requirements.txt",43,0,"[cuda12]",pip-requirements,content
|
47 |
+
46,196114,"requirements.txt",43,0,"",pip-requirements,selection_command
|
48 |
+
47,196235,"requirements.txt",73,2,"",pip-requirements,content
|
49 |
+
48,196239,"requirements.txt",73,0,"",pip-requirements,selection_command
|
50 |
+
49,197285,"models/tokenizer.py",0,0,"",python,tab
|
51 |
+
50,48684142,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
52 |
+
51,48686605,"utils/dataloader.py",17,0,"",python,selection_command
|
53 |
+
52,48686851,"utils/dataloader.py",28,0,"",python,selection_command
|
54 |
+
53,48686882,"utils/dataloader.py",29,0,"",python,selection_command
|
55 |
+
54,48686915,"utils/dataloader.py",53,0,"",python,selection_command
|
56 |
+
55,48686949,"utils/dataloader.py",54,0,"",python,selection_command
|
57 |
+
56,48693437,"utils/dataloader.py",1116,0,"",python,selection_command
|
58 |
+
57,48693602,"utils/dataloader.py",1856,0,"",python,selection_command
|
59 |
+
58,48693852,"utils/dataloader.py",2666,0,"",python,selection_command
|
60 |
+
59,48693885,"utils/dataloader.py",3406,0,"",python,selection_command
|
61 |
+
60,48693918,"utils/dataloader.py",4182,0,"",python,selection_command
|
62 |
+
61,48693951,"utils/dataloader.py",4380,0,"",python,selection_command
|
63 |
+
62,48694563,"utils/dataloader.py",0,0,"",python,selection_command
|
64 |
+
63,48746408,"utils/dataloader.py",17,0,"",python,selection_command
|
65 |
+
64,48746657,"utils/dataloader.py",28,0,"",python,selection_command
|
66 |
+
65,48746693,"utils/dataloader.py",29,0,"",python,selection_command
|
67 |
+
66,48746720,"utils/dataloader.py",53,0,"",python,selection_command
|
68 |
+
67,48746754,"utils/dataloader.py",54,0,"",python,selection_command
|
69 |
+
68,48746790,"utils/dataloader.py",128,0,"",python,selection_command
|
70 |
+
69,48747147,"utils/dataloader.py",182,0,"",python,selection_command
|
71 |
+
70,48747280,"utils/dataloader.py",183,0,"",python,selection_command
|
72 |
+
71,48747616,"utils/dataloader.py",0,0,"",python,selection_command
|
73 |
+
72,48749565,"utils/dataloader.py",920,0,"",python,selection_command
|
74 |
+
73,48749816,"utils/dataloader.py",1697,0,"",python,selection_command
|
75 |
+
74,48749850,"utils/dataloader.py",2522,0,"",python,selection_command
|
76 |
+
75,48749876,"utils/dataloader.py",3151,0,"",python,selection_command
|
77 |
+
76,48749909,"utils/dataloader.py",4029,0,"",python,selection_command
|
78 |
+
77,48749945,"utils/dataloader.py",4380,0,"",python,selection_command
|
79 |
+
78,48750603,"utils/dataloader.py",0,0,"",python,selection_command
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-6614bc95-283e-4f09-b0e7-458aba09077f1754428379797-2025_08_05-23.14.08.79/source.csv
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,6,"train_dynamics.py",0,0,"from dataclasses import dataclass, field\nimport os\nfrom typing import cast\n\nimport einops\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax.checkpoint as ocp\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\nimport grain\nimport flax.nnx as nnx\n\nfrom dynamics import Dynamics, restore_components\nfrom utils.dataloader import get_dataloader\nfrom utils.lr_utils import get_lr_schedule\nfrom utils.parameter_utils import count_parameters_by_component\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 200_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = """"\n save_ckpt: bool = False\n restore_ckpt: bool = False\n # Optimization\n batch_size: int = 36\n init_lr: float = 0.0\n max_lr: float = 3e-5\n decay_end: float = 0.0\n wsd_decay_steps: int = (\n 10000 # NOTE: wsd_decay_steps will only be used when using a wsd-schedule\n )\n warmup_steps: int = 5000\n lr_schedule: str = ""wsd"" # supported options: wsd, cos\n # Tokenizer\n tokenizer_dim: int = 512\n tokenizer_ffn_dim: int = 2048\n latent_patch_dim: int = 32\n num_patch_latents: int = 1024\n patch_size: int = 4\n tokenizer_num_blocks: int = 4\n tokenizer_num_heads: int = 8\n tokenizer_checkpoint: str = """"\n # LAM\n lam_dim: int = 512\n lam_ffn_dim: int = 2048\n latent_action_dim: int = 32\n num_latent_actions: int = 6\n lam_patch_size: int = 16\n lam_num_blocks: int = 4\n lam_num_heads: int = 8\n lam_checkpoint: str = """"\n dynamics_type: str = ""maskgit"" # supported options: maskgit, causal\n # Dynamics\n dyna_dim: int = 512\n dyna_ffn_dim: int = 2048\n dyna_num_blocks: int = 6\n dyna_num_heads: int = 8\n dropout: float = 0.0\n mask_limit: float = 0.5\n param_dtype = jnp.float32\n dtype = jnp.bfloat16\n use_flash_attention: bool = True\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_dynamics""\n tags: list[str] = field(default_factory=lambda: [""dynamics""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 25000\n log_checkpoint_keep_period: int = 20000\n log_gradients: bool = False\n wandb_id: str = """"\n\n\nargs = tyro.cli(Args)\n\n\ndef dynamics_loss_fn(\n model: Dynamics, inputs: dict\n) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n """"""Compute masked dynamics loss""""""\n inputs[""videos""] = inputs[""videos""].astype(args.dtype) / 255.0\n model.train()\n outputs = model(inputs, training=True)\n mask = outputs[""mask""]\n outputs[""token_logits""] = outputs[""token_logits""].astype(jnp.float32)\n ce_loss = optax.softmax_cross_entropy_with_integer_labels(\n outputs[""token_logits""], outputs[""video_tokens""]\n )\n ce_loss = (mask * ce_loss).sum() / mask.sum()\n acc = outputs[""token_logits""].argmax(-1) == outputs[""video_tokens""]\n acc = (mask * acc).sum() / mask.sum()\n select_probs = jax.nn.softmax(outputs[""token_logits""])\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = jnp.asarray(pix.psnr(gt, recon)).mean()\n ssim = jnp.asarray(pix.ssim(gt, recon)).mean()\n _, index_counts_lam = jnp.unique_counts(\n jnp.ravel(outputs[""lam_indices""]), size=args.num_latent_actions, fill_value=0\n )\n _, index_counts_tokenizer = jnp.unique_counts(\n jnp.ravel(outputs[""video_tokens""]), size=args.num_patch_latents, fill_value=0\n )\n codebook_usage_lam = (index_counts_lam != 0).mean()\n codebook_usage_tokenizer = (index_counts_tokenizer != 0).mean()\n metrics = dict(\n cross_entropy_loss=ce_loss,\n masked_token_accuracy=acc,\n select_logit=outputs[""token_logits""].max(-1).mean(),\n select_p=select_probs.max(-1).mean(),\n entropy=jax.scipy.special.entr(select_probs).sum(-1).mean(),\n psnr=psnr,\n ssim=ssim,\n codebook_usage_lam=codebook_usage_lam,\n codebook_usage_tokenizer=codebook_usage_tokenizer,\n )\n return ce_loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(\n model: Dynamics, optimizer: nnx.Optimizer, inputs: dict\n) -> tuple[jax.Array, jax.Array, dict]:\n """"""Update state and compute metrics""""""\n\n def loss_fn(model: Dynamics) -> tuple[jax.Array, tuple[jax.Array, dict]]:\n return dynamics_loss_fn(model, inputs)\n\n (loss, (recon, metrics)), grads = nnx.value_and_grad(loss_fn, has_aux=True)(model)\n optimizer.update(grads)\n if args.log_gradients:\n metrics[""gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""dynamics""]\n )\n return loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n\n # --- Initialize model ---\n rng, _rng = jax.random.split(rng)\n rngs = nnx.Rngs(_rng)\n dynamics = Dynamics(\n # Tokenizer\n in_dim=args.image_channels,\n tokenizer_dim=args.tokenizer_dim,\n tokenizer_ffn_dim=args.tokenizer_ffn_dim,\n latent_patch_dim=args.latent_patch_dim,\n num_patch_latents=args.num_patch_latents,\n patch_size=args.patch_size,\n tokenizer_num_blocks=args.tokenizer_num_blocks,\n tokenizer_num_heads=args.tokenizer_num_heads,\n # LAM\n lam_dim=args.lam_dim,\n lam_ffn_dim=args.lam_ffn_dim,\n latent_action_dim=args.latent_action_dim,\n num_latent_actions=args.num_latent_actions,\n lam_patch_size=args.lam_patch_size,\n lam_num_blocks=args.lam_num_blocks,\n lam_num_heads=args.lam_num_heads,\n lam_co_train=not args.lam_checkpoint,\n # Dynamics\n dynamics_type=args.dynamics_type,\n dyna_dim=args.dyna_dim,\n dyna_ffn_dim=args.dyna_ffn_dim,\n dyna_num_blocks=args.dyna_num_blocks,\n dyna_num_heads=args.dyna_num_heads,\n dropout=args.dropout,\n mask_limit=args.mask_limit,\n param_dtype=args.param_dtype,\n dtype=args.dtype,\n use_flash_attention=args.use_flash_attention,\n rngs=rngs,\n )\n\n _, params, _ = nnx.split(dynamics, nnx.Param, ...)\n param_counts = count_parameters_by_component(params)\n\n if args.log and jax.process_index() == 0:\n wandb_init_kwargs = {\n ""entity"": args.entity,\n ""project"": args.project,\n ""name"": args.name,\n ""tags"": args.tags,\n ""group"": ""debug"",\n ""config"": args,\n }\n\n if args.wandb_id:\n wandb_init_kwargs.update(\n {\n ""id"": args.wandb_id,\n ""resume"": ""allow"",\n }\n )\n wandb.init(**wandb_init_kwargs)\n\n wandb.config.update({""model_param_count"": param_counts})\n\n print(""Parameter counts:"")\n print(param_counts)\n\n # --- Initialize optimizer ---\n lr_schedule = get_lr_schedule(\n args.lr_schedule,\n args.init_lr,\n args.max_lr,\n args.decay_end,\n args.num_steps,\n args.warmup_steps,\n args.wsd_decay_steps,\n )\n tx = optax.adamw(\n learning_rate=lr_schedule,\n b1=0.9,\n b2=0.9,\n weight_decay=1e-4,\n mu_dtype=args.dtype,\n )\n optimizer = nnx.Optimizer(dynamics, tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n videos_sharding = NamedSharding(mesh, PartitionSpec(""data"", None, None, None, None))\n\n model_state = nnx.state(optimizer.model)\n model_sharded_state = jax.lax.with_sharding_constraint(\n model_state, replicated_sharding\n )\n nnx.update(optimizer.model, model_sharded_state)\n optimizer_state = nnx.state(optimizer, nnx.optimizer.OptState)\n optimizer_sharded_state = jax.lax.with_sharding_constraint(\n optimizer_state, replicated_sharding\n )\n nnx.update(optimizer, optimizer_sharded_state)\n\n # --- Initialize checkpoint manager ---\n step = 0\n handler_registry = ocp.handlers.DefaultCheckpointHandlerRegistry()\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeSave, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""model_state"", ocp.args.PyTreeRestore, ocp.handlers.PyTreeCheckpointHandler\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointSave,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n handler_registry.add(\n ""dataloader_state"",\n grain.checkpoint.CheckpointRestore,\n cast(ocp.handlers.CheckpointHandler, grain.checkpoint.CheckpointHandler),\n )\n\n checkpoint_options = ocp.CheckpointManagerOptions(\n save_interval_steps=args.log_checkpoint_interval,\n max_to_keep=3,\n keep_period=args.log_checkpoint_keep_period,\n step_format_fixed_length=6,\n cleanup_tmp_directories=True,\n )\n\n checkpoint_manager = ocp.CheckpointManager(\n args.ckpt_dir,\n options=checkpoint_options,\n handler_registry=handler_registry,\n )\n\n # --- Create DataLoaderIterator from dataloader ---\n image_shape = (args.image_height, args.image_width, args.image_channels)\n array_record_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".array_record"")\n ]\n grain_dataloader = get_dataloader(\n array_record_files,\n args.seq_len,\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n args.batch_size,\n *image_shape,\n num_workers=8,\n prefetch_buffer_size=1,\n seed=args.seed,\n )\n initial_state = grain_dataloader._create_initial_state()\n grain_iterator = grain.DataLoaderIterator(grain_dataloader, initial_state)\n\n # --- Restore checkpoint ---\n if args.restore_ckpt:\n abstract_optimizer = nnx.eval_shape(lambda: optimizer)\n abstract_optimizer_state = nnx.state(abstract_optimizer)\n restored = checkpoint_manager.restore(\n checkpoint_manager.latest_step(),\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeRestore(abstract_optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointRestore(grain_iterator), # type: ignore\n ),\n )\n restored_optimizer_state = restored[""model_state""]\n nnx.update(optimizer, restored_optimizer_state)\n grain_iterator = restored[""dataloader_state""]\n step = checkpoint_manager.latest_step() or 0\n print(f""Restored dataloader and model state from step {step}"")\n else:\n # Restore from pre-trained tokenizer (and LAM)\n optimizer = restore_components(optimizer, replicated_sharding, rng, args)\n # NOTE: We have to remove the tokenizer vq dropout due to a bug in flax.nnx\n del optimizer.model.tokenizer.vq.drop\n\n # --- TRAIN LOOP ---\n dataloader = (\n jax.make_array_from_process_local_data(videos_sharding, elem)\n for elem in grain_iterator\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng_mask = jax.random.split(rng, 2)\n inputs = dict(videos=videos, mask_rng=_rng_mask)\n loss, recon, metrics = train_step(dynamics, optimizer, inputs)\n metrics[""lr""] = lr_schedule(step)\n print(f""Step {step}, loss: {loss}"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0].astype(jnp.float32) / 255.0\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[args.seq_len - 1])),\n recon=wandb.Image(np.asarray(recon_seq[args.seq_len - 1])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n # --- Checkpointing ---\n if args.save_ckpt and step % args.log_checkpoint_interval == 0:\n optimizer_state = nnx.state(optimizer)\n checkpoint_manager.save(\n step,\n args=ocp.args.Composite(\n model_state=ocp.args.PyTreeSave(optimizer_state), # type: ignore\n dataloader_state=grain.checkpoint.CheckpointSave( # type: ignore\n grain_iterator # type: ignore\n ),\n ),\n )\n print(f""Saved checkpoint at step {step}"")\n if step >= args.num_steps:\n break\n\n checkpoint_manager.close()\n",python,tab
|
3 |
+
2,696,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"11:14:08 PM [info] Activating crowd-code\n11:14:08 PM [info] Recording started\n11:14:08 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,1284,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"11:14:08 PM [info] Git repository found\n11:14:08 PM [info] Git provider initialized successfully\n11:14:08 PM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,15156,"train_dynamics.py",0,0,"",python,tab
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-68bb0852-886e-4fc0-9144-b85c1ade71281752827220294-2025_07_18-10.27.35.446/source.csv
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,16,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"#!/usr/bin/env bash\n\n#SBATCH --nodes=2\n#SBATCH --ntasks-per-node=4\n#SBATCH --time=24:00:00\n#SBATCH --partition=accelerated\n#SBATCH --cpus-per-task=5\n#SBATCH --gres=gpu:4\n#SBATCH --output=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/%x_%j.log\n#SBATCH --error=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/logs/logs_franz/big-runs/%x_%j.log\n#SBATCH --job-name=train_tokenizer_lr_sweep_1e-4_larger_ffn\n\n# Log the sbatch script\ncat $0\n\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\narray_records_dir=/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data_new/open_ai_minecraft_arrayrecords_chunked\n\njob_name=$SLURM_JOB_NAME\nslurm_job_id=$SLURM_JOB_ID\n\nCHECKPOINT_DIR=$PWD/checkpoints/$job_name\nmkdir -p $CHECKPOINT_DIR\n\nenv | grep SLURM\n\nsrun python train_tokenizer.py \\n --ckpt_dir $CHECKPOINT_DIR \\n --restore_ckpt \\n --save_ckpt \\n --batch_size=96 \\n --ffn_dim=2048 \\n --num_blocks=4 \\n --init_lr=0 \\n --max_lr=1e-4 \\n --log_image_interval=1000 \\n --log_checkpoint_interval=1000 \\n --log \\n --name=mixed-precision-tokenizer-lr-1e-4-larger-ffn \\n --tags tokenizer mixed-precision 1e-4 mixed-precision flash-attention larger-ffn \\n --entity instant-uv \\n --project jafar \\n --data_dir $array_records_dir",shellscript,tab
|
3 |
+
2,853,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"10:27:35 AM [info] Activating crowd-code\n10:27:35 AM [info] Recording started\n10:27:35 AM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,1195,"extension-output-pdoom-org.crowd-code-#1-crowd-code",153,0,"10:27:35 AM [info] Git repository found\n10:27:35 AM [info] Git provider initialized successfully\n10:27:35 AM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,1685,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"",shellscript,tab
|
6 |
+
5,1658732,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"Switched from branch 'distinct-ffn-dim' to 'main'",shellscript,git_branch_checkout
|
7 |
+
6,1728732,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"Switched from branch 'main' to 'revert-partial-remat'",shellscript,git_branch_checkout
|
8 |
+
7,6029114,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"Switched from branch 'revert-partial-remat' to 'main'",shellscript,git_branch_checkout
|
9 |
+
8,6069104,"experiments/train_tokenizer_lr_1e-4_larger_ffn.sh",0,0,"Switched from branch 'main' to 'distinct-ffn-dim'",shellscript,git_branch_checkout
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-73fae8d1-0502-4ab4-bd59-8818555f36691753200603120-2025_07_22-18.10.30.380/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-76073275-4388-463f-8e12-ce34ee46fad51752495312029-2025_07_14-14.15.14.704/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-7879e034-f897-48e8-8481-1a87a73b0dc81752135543307-2025_07_10-10.19.09.565/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-79bdcdff-1889-496c-9bc9-830e887f70d81751447790479-2025_07_02-11.39.26.104/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-81dc70dc-8e01-48a6-9a00-9349b9f9a4171751541780271-2025_07_03-13.23.33.804/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-84bc9952-c4b0-4456-bdc2-984faf53684f1751163593750-2025_06_28-19.19.55.196/source.csv
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,3,"tasks",0,0,"",Log,tab
|
3 |
+
2,18,"utils/dataloader.py",0,0,"import functools\nimport jax\n\nimport tensorflow as tf\n\n# reserve GPU memory for JAX only if tensorflow is built with GPU support\ntf.config.experimental.set_visible_devices([], ""GPU"")\n\n\n# --- TensorFlow function for processing: slicing, normalization ---\ndef _tf_process_episode(episode_tensor, seq_len, image_h, image_w, image_c):\n """"""\n Processes a raw episode tensor in TensorFlow.\n Takes a full episode, extracts a random sequence, and normalizes it.\n Args:\n episode_tensor: A TensorFlow tensor representing a full video episode.\n Expected shape: (dynamic_length, image_h, image_w, image_c)\n Expected dtype: e.g., tf.uint8 (raw pixel values)\n seq_len: The desired length of the sub-sequence to extract.\n image_h: The height of each frame.\n image_w: The width of each frame.\n image_c: The number of channels in each frame.\n Returns:\n A TensorFlow tensor representing the processed video sequence.\n Shape: (seq_len, image_h, image_w, image_c)\n Dtype: tf.float32 (normalized pixel values)\n """"""\n current_episode_len = tf.shape(episode_tensor)[0]\n\n max_start_idx = current_episode_len - seq_len\n\n start_idx = tf.random.uniform(\n shape=(), minval=0, maxval=max_start_idx + 1, dtype=tf.int32\n )\n\n seq = episode_tensor[start_idx : start_idx + seq_len]\n\n seq = tf.cast(seq, tf.float32) / 255.0\n\n # Ensure the final shape is statically known for batching.\n # tf.reshape is robust, but tf.ensure_shape or set_shape can also be used if confident.\n processed_sequence = tf.reshape(seq, [seq_len, image_h, image_w, image_c])\n\n return processed_sequence\n\n\ndef _parse_tfrecord_fn(example_proto, image_h, image_w, image_c):\n feature_description = {\n ""height"": tf.io.FixedLenFeature([], tf.int64),\n ""width"": tf.io.FixedLenFeature([], tf.int64),\n ""channels"": tf.io.FixedLenFeature([], tf.int64),\n ""sequence_length"": tf.io.FixedLenFeature([], tf.int64),\n ""raw_video"": tf.io.FixedLenFeature([], tf.string),\n }\n example = tf.io.parse_single_example(example_proto, feature_description)\n\n video_shape = (example[""sequence_length""], image_h, image_w, image_c)\n\n episode_tensor = tf.io.decode_raw(example[""raw_video""], out_type=tf.uint8)\n episode_tensor = tf.reshape(episode_tensor, video_shape)\n\n episode_tensor = tf.ensure_shape(episode_tensor, [None, image_h, image_w, image_c])\n return episode_tensor\n\n\ndef get_dataloader(\n tfrecord_paths: list[str], # List of TFRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n shuffle_buffer_size: int = 1000,\n num_parallel_calls: int = tf.data.AUTOTUNE,\n seed: int = 42,\n):\n """"""\n Creates a tf.data.Dataset pipeline from TFRecord files.\n """"""\n if not tfrecord_paths:\n raise ValueError(""tfrecord_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n assert (\n global_batch_size % num_processes == 0\n ), ""Global batch size {global_batch_size} \\n must be divisible by the number of JAX processes {num_processes} for proper sharding.""\n per_process_batch_size = global_batch_size // num_processes\n\n dataset = tf.data.TFRecordDataset(\n tfrecord_paths, num_parallel_reads=tf.data.AUTOTUNE\n )\n\n dataset = dataset.shard(num_shards=num_processes, index=process_id)\n\n # (f.srambical) NOTE: For TFRecords, it's often good to have a large shuffle buffer.\n if shuffle_buffer_size > 0:\n dataset = dataset.shuffle(\n buffer_size=shuffle_buffer_size, seed=seed, reshuffle_each_iteration=True\n )\n parse_fn = functools.partial(\n _parse_tfrecord_fn, image_h=image_h, image_w=image_w, image_c=image_c\n )\n dataset = dataset.map(parse_fn, num_parallel_calls=num_parallel_calls)\n\n tf_process_fn = functools.partial(\n _tf_process_episode,\n seq_len=seq_len,\n image_h=image_h,\n image_w=image_w,\n image_c=image_c,\n )\n dataset = dataset.map(tf_process_fn, num_parallel_calls=num_parallel_calls)\n\n dataset = dataset.repeat(None)\n dataset = dataset.batch(per_process_batch_size, drop_remainder=True)\n dataset = dataset.prefetch(tf.data.AUTOTUNE)\n\n return dataset.as_numpy_iterator()\n",python,tab
|
4 |
+
3,40,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
5 |
+
4,41,"utils/dataloader.py",0,0,"",python,tab
|
6 |
+
5,1469,"utils/dataloader.py",0,0,"",python,selection_command
|
7 |
+
6,3873,"train_tokenizer.py",0,0,"from dataclasses import dataclass, field\nimport os\nimport time\n\nimport einops\nfrom flax.training import orbax_utils\nfrom flax.training.train_state import TrainState\nfrom jax.sharding import Mesh, PartitionSpec, NamedSharding\nfrom jax.experimental.mesh_utils import create_device_mesh\nimport optax\nimport orbax\nfrom orbax.checkpoint import PyTreeCheckpointer\nimport numpy as np\nimport dm_pix as pix\nimport jax\nimport jax.numpy as jnp\nimport tyro\nimport wandb\n\nfrom models.tokenizer import TokenizerVQVAE\nfrom utils.dataloader import get_dataloader\n\nts = int(time.time())\n\n\n@dataclass\nclass Args:\n # Experiment\n num_steps: int = 300_000\n seed: int = 0\n seq_len: int = 16\n image_channels: int = 3\n image_height: int = 90\n image_width: int = 160\n data_dir: str = ""data_tfrecords/coinrun""\n checkpoint: str = """"\n # Optimization\n vq_beta: float = 0.25\n batch_size: int = 48\n min_lr: float = 3e-4\n max_lr: float = 3e-4\n warmup_steps: int = 10000\n # Tokenizer\n model_dim: int = 512\n latent_dim: int = 32\n num_latents: int = 1024\n patch_size: int = 4\n num_blocks: int = 8\n num_heads: int = 8\n dropout: float = 0.0\n codebook_dropout: float = 0.01\n # Logging\n log: bool = False\n entity: str = """"\n project: str = """"\n name: str = ""train_tokenizer""\n tags: list[str] = field(default_factory=lambda: [""tokenizer""])\n log_interval: int = 5\n log_image_interval: int = 250\n ckpt_dir: str = """"\n log_checkpoint_interval: int = 10000\n log_gradients: bool = False\n\n\nargs = tyro.cli(Args)\n\n\ndef tokenizer_loss_fn(params, state, inputs):\n # --- Compute loss ---\n outputs = state.apply_fn(\n params,\n inputs,\n training=True,\n rngs={""params"": inputs[""rng""], ""dropout"": inputs[""dropout_rng""]},\n )\n mse = jnp.square(inputs[""videos""] - outputs[""recon""]).mean()\n q_loss = jnp.square(jax.lax.stop_gradient(outputs[""emb""]) - outputs[""z""]).mean()\n commitment_loss = jnp.square(\n outputs[""emb""] - jax.lax.stop_gradient(outputs[""z""])\n ).mean()\n loss = mse + q_loss + args.vq_beta * commitment_loss\n\n # --- Compute validation metrics ---\n gt = inputs[""videos""].clip(0, 1).reshape(-1, *inputs[""videos""].shape[2:])\n recon = outputs[""recon""].clip(0, 1).reshape(-1, *outputs[""recon""].shape[2:])\n psnr = pix.psnr(gt, recon).mean()\n ssim = pix.ssim(gt, recon).mean()\n _, index_counts = jnp.unique_counts(\n jnp.ravel(outputs[""indices""]), size=args.num_latents, fill_value=0\n )\n codebook_usage = (index_counts != 0).mean()\n metrics = dict(\n loss=loss,\n mse=mse,\n q_loss=q_loss,\n commitment_loss=commitment_loss,\n psnr=psnr,\n ssim=ssim,\n codebook_usage=codebook_usage,\n )\n return loss, (outputs[""recon""], metrics)\n\n\[email protected]\ndef train_step(state, inputs):\n grad_fn = jax.value_and_grad(tokenizer_loss_fn, has_aux=True, allow_int=True)\n (loss, (recon, metrics)), grads = grad_fn(state.params, state, inputs)\n state = state.apply_gradients(grads=grads)\n if args.log_gradients:\n metrics[""encoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""encoder""]\n )\n metrics[""vq_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""vq""]\n )\n metrics[""decoder_gradients_std/""] = jax.tree.map(\n lambda x: x.std(), grads[""params""][""decoder""]\n )\n return state, loss, recon, metrics\n\n\nif __name__ == ""__main__"":\n jax.distributed.initialize()\n num_devices = jax.device_count()\n if num_devices == 0:\n raise ValueError(""No JAX devices found."")\n print(f""Running on {num_devices} devices."")\n\n if args.batch_size % num_devices != 0:\n raise ValueError(\n f""Global batch size {args.batch_size} must be divisible by ""\n f""number of devices {num_devices}.""\n )\n\n per_device_batch_size_for_init = args.batch_size // num_devices\n\n rng = jax.random.PRNGKey(args.seed)\n if args.log and jax.process_index() == 0:\n wandb.init(\n entity=args.entity,\n project=args.project,\n name=args.name,\n tags=args.tags,\n group=""debug"",\n config=args\n )\n\n # --- Initialize model ---\n tokenizer = TokenizerVQVAE(\n in_dim=args.image_channels,\n model_dim=args.model_dim,\n latent_dim=args.latent_dim,\n num_latents=args.num_latents,\n patch_size=args.patch_size,\n num_blocks=args.num_blocks,\n num_heads=args.num_heads,\n dropout=args.dropout,\n codebook_dropout=args.codebook_dropout,\n )\n rng, _rng = jax.random.split(rng)\n image_shape = (args.image_height, args.image_width, args.image_channels)\n inputs = dict(\n videos=jnp.zeros(\n (per_device_batch_size_for_init, args.seq_len, *image_shape),\n dtype=jnp.float32,\n ),\n )\n init_params = tokenizer.init(_rng, inputs)\n\n # --- Initialize optimizer ---\n lr_schedule = optax.warmup_cosine_decay_schedule(\n args.min_lr, args.max_lr, args.warmup_steps, args.num_steps\n )\n tx = optax.adamw(learning_rate=lr_schedule, b1=0.9, b2=0.9, weight_decay=1e-4)\n train_state = TrainState.create(apply_fn=tokenizer.apply, params=init_params, tx=tx)\n\n # FIXME: switch to create_hybrid_device_mesh for runs spanning multiple nodes\n device_mesh_arr = create_device_mesh((num_devices,))\n mesh = Mesh(devices=device_mesh_arr, axis_names=(""data"",))\n\n replicated_sharding = NamedSharding(mesh, PartitionSpec())\n train_state = jax.device_put(train_state, replicated_sharding)\n\n # --- Load checkpoint ---\n step = 0\n if args.checkpoint:\n restore_target = {""model"": train_state}\n restore_args = orbax_utils.restore_args_from_target(restore_target)\n train_state.params[""params""].update(\n PyTreeCheckpointer()\n .restore(args.checkpoint, item=restore_target, restore_args=restore_args)[\n ""model""\n ]\n .params[""params""]\n )\n # Assume checkpoint is of the form tokenizer_<timestamp>_<step>\n step += int(args.checkpoint.split(""_"")[-1])\n\n # --- TRAIN LOOP ---\n tfrecord_files = [\n os.path.join(args.data_dir, x)\n for x in os.listdir(args.data_dir)\n if x.endswith("".tfrecord"")\n ]\n dataloader = get_dataloader(\n # NOTE: We deliberately pass the global batch size\n # The dataloader shards the dataset across all processes\n tfrecord_files,\n args.seq_len,\n args.batch_size,\n *image_shape,\n )\n print(f""Starting training from step {step}..."")\n while step < args.num_steps:\n for videos in dataloader:\n # --- Train step ---\n rng, _rng, _rng_dropout = jax.random.split(rng, 3)\n\n videos_sharding = NamedSharding(\n mesh, PartitionSpec(""data"", None, None, None, None)\n )\n videos = jax.make_array_from_process_local_data(videos_sharding, videos)\n\n inputs = dict(videos=videos, rng=_rng, dropout_rng=_rng_dropout)\n start_time = time.time()\n train_state, loss, recon, metrics = train_step(train_state, inputs)\n elapsed_time = (time.time() - start_time) * 1000\n print(f""Step {step}, loss: {loss}, step time: {elapsed_time}ms"")\n step += 1\n\n # --- Logging ---\n if args.log:\n if step % args.log_interval == 0 and jax.process_index() == 0:\n wandb.log(\n {\n ""loss"": loss,\n ""step"": step,\n ""step_time_ms"": elapsed_time,\n **metrics,\n }\n )\n if step % args.log_image_interval == 0:\n gt_seq = inputs[""videos""][0]\n recon_seq = recon[0].clip(0, 1)\n comparison_seq = jnp.concatenate((gt_seq, recon_seq), axis=1)\n comparison_seq = einops.rearrange(\n comparison_seq * 255, ""t h w c -> h (t w) c""\n )\n # NOTE: Process-dependent control flow deliberately happens\n # after indexing operation since it must not contain code\n # sections that lead to cross-accelerator communication.\n if jax.process_index() == 0:\n log_images = dict(\n image=wandb.Image(np.asarray(gt_seq[0])),\n recon=wandb.Image(np.asarray(recon_seq[0])),\n true_vs_recon=wandb.Image(\n np.asarray(comparison_seq.astype(np.uint8))\n ),\n )\n wandb.log(log_images)\n if step % args.log_checkpoint_interval == 0:\n ckpt = {""model"": train_state}\n orbax_checkpointer = orbax.checkpoint.PyTreeCheckpointer()\n save_args = orbax_utils.save_args_from_target(ckpt)\n orbax_checkpointer.save(\n os.path.join(os.getcwd(), args.ckpt_dir, f""tokenizer_{ts}_{step}""),\n ckpt,\n save_args=save_args,\n )\n if step >= args.num_steps:\n break\n",python,tab
|
8 |
+
7,5631,"models/tokenizer.py",0,0,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = nn.sigmoid(recon)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = nn.sigmoid(recon)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,tab
|
9 |
+
8,6632,"models/tokenizer.py",1009,0,"",python,selection_command
|
10 |
+
9,59165787,"models/tokenizer.py",0,2173,"from typing import Dict, Any, Tuple\n\nimport flax.linen as nn\n\nfrom utils.preprocess import patchify, unpatchify\nfrom utils.nn import STTransformer, VectorQuantizer\n\n\nclass TokenizerVQVAE(nn.Module):\n """"""ST-ViVit VQ-VAE""""""\n\n in_dim: int\n model_dim: int\n latent_dim: int\n num_latents: int\n patch_size: int\n num_blocks: int\n num_heads: int\n dropout: float\n codebook_dropout: float\n\n def setup(self):\n self.encoder = STTransformer(\n self.model_dim,\n self.latent_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n self.vq = VectorQuantizer(\n self.latent_dim,\n self.num_latents,\n self.codebook_dropout,\n )\n self.out_dim = self.in_dim * self.patch_size**2\n self.decoder = STTransformer(\n self.model_dim,\n self.out_dim,\n self.num_blocks,\n self.num_heads,\n self.dropout,\n )\n\n def __call__(self, batch: Dict[str, Any], training: bool = True) -> Dict[str, Any]:\n H, W = batch[""videos""].shape[2:4]\n outputs = self.vq_encode(batch[""videos""], training)\n recon = self.decoder(outputs[""z_q""]) # (B, T, H_down * W_down, C)\n recon = nn.sigmoid(recon)\n outputs[""recon""] = unpatchify(recon, self.patch_size, H, W)\n return outputs\n\n def vq_encode(self, videos: Any, training: bool = True) -> Dict[str, Any]:\n # --- Preprocess + encode ---\n B, T = videos.shape[:2]\n x = patchify(videos, self.patch_size)\n N = x.shape[2]\n x = self.encoder(x) # (B, T, N, E)\n\n # --- Vector quantize ---\n x = x.reshape(B * T * N, self.latent_dim)\n z_q, z, emb, indices = self.vq(x, training)\n z_q = z_q.reshape(B, T, N, self.latent_dim)\n indices = indices.reshape(B, T, N)\n return dict(z_q=z_q, z=z, emb=emb, indices=indices)\n\n def decode(self, indices: Any, video_hw: Tuple[int, int]):\n z = self.vq.codebook[indices]\n recon = self.decoder(z)\n recon = nn.sigmoid(recon)\n return unpatchify(recon, self.patch_size, *video_hw)\n",python,selection_command
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-88e0ea71-dd7c-4ca4-b50e-d557352226511751452791721-2025_07_02-12.40.17.73/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-8a6a12ae-3e93-40d9-8a49-8ed320340ef81751310132462-2025_06_30-21.03.13.314/source.csv
ADDED
@@ -0,0 +1,443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,6,"utils/dataloader.py",0,0,"import jax\nimport numpy as np\nimport grain\nfrom typing import Any, Optional\nfrom array_record.python import array_record_module\nimport tensorflow as tf\nimport os\nfrom pathlib import Path\n\n\ndef convert_tfrecords_to_arrayrecords(\n tfrecord_folder: str,\n output_folder: str,\n records_per_file: int = 1000,\n feature_description: Optional[dict] = None,\n):\n """"""\n Converts TFRecord files to ArrayRecord format for use with Grain.\n \n Args:\n tfrecord_folder: Path to folder containing TFRecord files\n output_folder: Path to output folder for ArrayRecord files\n records_per_file: Number of records to write per ArrayRecord file\n feature_description: Dictionary describing TFRecord features. If None,\n uses default description for video data.\n \n Returns:\n List of paths to created ArrayRecord files\n """"""\n if feature_description is None:\n feature_description = {\n 'raw_video': tf.io.FixedLenFeature([], tf.string),\n 'sequence_length': tf.io.FixedLenFeature([], tf.int64),\n }\n \n os.makedirs(output_folder, exist_ok=True)\n \n tfrecord_files = list(Path(tfrecord_folder).glob(""*.tfrecord""))\n if not tfrecord_files:\n raise ValueError(f""No TFRecord files found in {tfrecord_folder}"")\n \n print(f""Found {len(tfrecord_files)} TFRecord files"")\n \n dataset = tf.data.TFRecordDataset([str(f) for f in tfrecord_files])\n \n def parse_tfrecord(example_proto):\n """"""Parse a single TFRecord example.""""""\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\n return {\n 'raw_video': parsed_features['raw_video'].numpy(),\n 'sequence_length': int(parsed_features['sequence_length'].numpy())\n }\n \n arrayrecord_files = []\n record_count = 0\n file_index = 0\n \n current_writer = None\n \n for record in dataset:\n parsed_record = parse_tfrecord(record)\n \n if current_writer is None:\n output_file = os.path.join(output_folder, f""data_{file_index:04d}.arrayrecord"")\n current_writer = grain.io.ArrayRecordWriter(output_file)\n arrayrecord_files.append(output_file)\n \n current_writer.write(parsed_record)\n record_count += 1\n \n if record_count % records_per_file == 0:\n current_writer.close()\n current_writer = None\n file_index += 1\n print(f""Created ArrayRecord file {file_index}: {record_count} records processed"")\n \n if current_writer is not None:\n current_writer.close()\n print(f""Created final ArrayRecord file: {record_count} total records processed"")\n \n print(f""Conversion complete! Created {len(arrayrecord_files)} ArrayRecord files"")\n return arrayrecord_files\n\n\nclass ProcessEpisodeAndSlice(grain.transforms.RandomMap):\n """"""\n A Grain Transformation that combines parsing, slicing, and normalizing.\n\n This class replaces the functionality of both `_parse_tfrecord_fn` and\n `_tf_process_episode` from the original tf.data pipeline. It operates on\n a single data record (episode).\n """"""\n\n def __init__(self, seq_len: int, image_h: int, image_w: int, image_c: int):\n """"""Initializes the transformation with processing parameters.""""""\n self.seq_len = seq_len\n self.image_h = image_h\n self.image_w = image_w\n self.image_c = image_c\n\n def random_map(self, element: dict, rng: np.random.Generator) -> Any:\n """"""\n Processes a single raw episode from the data source.\n\n Args:\n element: A dictionary representing one record from the DataSource.\n Expected to contain 'raw_video' (bytes), and\n 'sequence_length' (int).\n rng: A per-record random number generator provided by the Grain sampler.\n\n Returns:\n A processed video sequence as a NumPy array with shape\n (seq_len, image_h, image_w, image_c) and dtype float32.\n """"""\n # Step 1: Parse and reshape the raw video data (replaces _parse_tfrecord_fn)\n # We assume the element from the data source is a dict.\n video_shape = (\n element[""sequence_length""],\n self.image_h,\n self.image_w,\n self.image_c,\n )\n episode_tensor = np.frombuffer(element[""raw_video""], dtype=np.uint8)\n episode_tensor = episode_tensor.reshape(video_shape)\n\n # Step 2: Randomly slice and normalize (replaces _tf_process_episode)\n current_episode_len = episode_tensor.shape[0]\n if current_episode_len < self.seq_len:\n raise ValueError(f""An episode has length {current_episode_len}, which is ""\n f""shorter than the requested sequence length {self.seq_len}."")\n \n max_start_idx = current_episode_len - self.seq_len\n \n # Use the provided Grain RNG for deterministic randomness.\n start_idx = rng.integers(0, max_start_idx + 1)\n\n seq = episode_tensor[start_idx : start_idx + self.seq_len]\n\n # Normalize to [0, 1]\n processed_sequence = seq.astype(np.float32) / 255.0\n\n return processed_sequence\n\n\ndef get_dataloader_grain(\n array_record_paths: list[str], # List of ArrayRecord file paths\n seq_len: int,\n global_batch_size: int,\n image_h: int,\n image_w: int,\n image_c: int,\n # shuffle_buffer_size is not needed; Grain shuffles all indices globally.\n num_workers: int = 4, # Replaces num_parallel_calls\n seed: int = 42,\n):\n """"""\n Creates a data loading pipeline using Google Grain.\n\n This function sets up a data source, a sampler for sharding/shuffling,\n and a sequence of operations (transformations) to be applied to the data.\n """"""\n if not array_record_paths:\n raise ValueError(""array_record_paths list cannot be empty."")\n\n process_id = jax.process_index()\n num_processes = jax.process_count()\n\n if global_batch_size % num_processes != 0:\n raise ValueError(\n f""Global batch size {global_batch_size} must be divisible by ""\n f""the number of JAX processes {num_processes} for proper sharding.""\n )\n per_process_batch_size = global_batch_size // num_processes\n\n # 1. DataSource: Reads records from ArrayRecord files.\n # This replaces `tf.data.TFRecordDataset`.\n source = grain.sources.ArrayRecordDataSource(array_record_paths)\n \n # 2. Sampler: Defines the order of access, sharding, shuffling, and epochs.\n # This replaces `dataset.shard`, `dataset.shuffle`, and `dataset.repeat`.\n shard_options = grain.sharding.ShardOptions(\n shard_index=process_id, shard_count=num_processes, drop_remainder=True\n )\n\n sampler = grain.samplers.IndexSampler(\n num_records=len(source),\n shard_options=shard_options,\n seed=seed,\n num_epochs=None, # `None` means repeat indefinitely, like `tf.data.repeat(None)`.\n shuffle=True, # Enables global shuffling of all record indices.\n )\n\n # 3. Operations: A list of transformations to apply to each record.\n # This replaces the sequence of `.map()` calls.\n operations = [\n ProcessEpisodeAndSlice(\n seq_len=seq_len, image_h=image_h, image_w=image_w, image_c=image_c\n ),\n grain.transforms.Batch(batch_size=per_process_batch_size, drop_remainder=True),\n ]\n\n # 4. DataLoader: The main entry point that orchestrates the pipeline.\n # It manages worker processes for parallel data loading and transformation,\n # replacing `num_parallel_calls` and `.prefetch()`.\n dataloader = grain.DataLoader(\n data_source=source,\n sampler=sampler,\n operations=operations,\n worker_count=num_workers,\n )\n\n # 5. Return an iterator, which is the standard way to consume a Grain DataLoader.\n return iter(dataloader)\n\n",python,tab
|
3 |
+
2,853,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"9:03:13 PM [info] Activating crowd-code\n9:03:13 PM [info] Recording started\n9:03:13 PM [info] Initializing git provider using file system watchers...\n9:03:13 PM [info] Git repository found\n9:03:13 PM [info] Git provider initialized successfully\n9:03:13 PM [info] Initial git state: [object Object]\n",Log,tab
|
4 |
+
3,1956,"utils/dataloader.py",0,0,"",python,tab
|
5 |
+
4,9249,"requirements.txt",0,0,"dm_pix>=0.4.3\neinops>=0.8.0\nflax>=0.8.5\njax[cuda12]>=0.4.30\noptax>=0.2.3\nprocgen>=0.10.7\ntyro>=0.8.5\nwandb>=0.17.4\ngrain>=0.2.10\npre-commit>=4.2.0\narray-record>=0.7.2",pip-requirements,tab
|
6 |
+
5,9285,"requirements.txt",115,0,"",pip-requirements,selection_command
|
7 |
+
6,15295,"requirements.txt",166,0,"",pip-requirements,selection_mouse
|
8 |
+
7,15306,"requirements.txt",165,0,"",pip-requirements,selection_command
|
9 |
+
8,15308,"requirements.txt",165,1,"2",pip-requirements,selection_mouse
|
10 |
+
9,15327,"requirements.txt",166,0,"",pip-requirements,selection_command
|
11 |
+
10,16030,"requirements.txt",165,0,"",pip-requirements,selection_command
|
12 |
+
11,19453,"utils/dataloader.py",0,0,"",python,tab
|
13 |
+
12,30378,"requirements.txt",0,0,"",pip-requirements,tab
|
14 |
+
13,30380,"requirements.txt",115,0,"",pip-requirements,selection_command
|
15 |
+
14,157589,"utils/dataloader.py",0,0,"",python,tab
|
16 |
+
15,158851,"utils/dataloader.py",150,0,"",python,selection_command
|
17 |
+
16,158857,"utils/dataloader.py",108,0,"",python,selection_command
|
18 |
+
17,278411,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",0,0,"# Copyright 2022 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the ""License"");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an ""AS IS"" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n""""""Tests for array_record_module.""""""\n\nimport os\n\nfrom absl.testing import absltest\n\nfrom array_record.python.array_record_module import ArrayRecordReader\nfrom array_record.python.array_record_module import ArrayRecordWriter\n\n\nclass ArrayRecordModuleTest(absltest.TestCase):\n\n def setUp(self):\n super(ArrayRecordModuleTest, self).setUp()\n self.test_file = os.path.join(self.create_tempdir().full_path,\n ""test.arecord"")\n\n def test_open_and_close(self):\n writer = ArrayRecordWriter(self.test_file)\n self.assertTrue(writer.ok())\n self.assertTrue(writer.is_open())\n writer.close()\n self.assertFalse(writer.is_open())\n\n reader = ArrayRecordReader(self.test_file)\n self.assertTrue(reader.ok())\n self.assertTrue(reader.is_open())\n reader.close()\n self.assertFalse(reader.is_open())\n\n def test_bad_options(self):\n\n def create_writer():\n ArrayRecordWriter(self.test_file, ""blah"")\n\n def create_reader():\n ArrayRecordReader(self.test_file, ""blah"")\n\n self.assertRaises(ValueError, create_writer)\n self.assertRaises(ValueError, create_reader)\n\n def test_write_read(self):\n writer = ArrayRecordWriter(self.test_file)\n test_strs = [b""abc"", b""def"", b""ghi""]\n for s in test_strs:\n writer.write(s)\n writer.close()\n reader = ArrayRecordReader(\n self.test_file, ""readahead_buffer_size:0,max_parallelism:0""\n )\n num_strs = len(test_strs)\n self.assertEqual(reader.num_records(), num_strs)\n self.assertEqual(reader.record_index(), 0)\n for gt in test_strs:\n result = reader.read()\n self.assertEqual(result, gt)\n self.assertRaises(IndexError, reader.read)\n reader.seek(0)\n self.assertEqual(reader.record_index(), 0)\n self.assertEqual(reader.read(), test_strs[0])\n self.assertEqual(reader.record_index(), 1)\n\n def test_write_read_non_unicode(self):\n writer = ArrayRecordWriter(self.test_file)\n b = b""F\xc3\xb8\xc3\xb6\x97\xc3\xa5r""\n writer.write(b)\n writer.close()\n reader = ArrayRecordReader(self.test_file)\n self.assertEqual(reader.read(), b)\n\n def test_write_read_with_file_reader_buffer_size(self):\n writer = ArrayRecordWriter(self.test_file)\n b = b""F\xc3\xb8\xc3\xb6\x97\xc3\xa5r""\n writer.write(b)\n writer.close()\n reader = ArrayRecordReader(self.test_file, file_reader_buffer_size=2**10)\n self.assertEqual(reader.read(), b)\n\n def test_batch_read(self):\n writer = ArrayRecordWriter(self.test_file)\n test_strs = [b""abc"", b""def"", b""ghi"", b""kkk"", b""...""]\n for s in test_strs:\n writer.write(s)\n writer.close()\n reader = ArrayRecordReader(self.test_file)\n results = reader.read_all()\n self.assertEqual(test_strs, results)\n indices = [1, 3, 0]\n expected = [test_strs[i] for i in indices]\n batch_fetch = reader.read(indices)\n self.assertEqual(expected, batch_fetch)\n\n def test_read_range(self):\n writer = ArrayRecordWriter(self.test_file)\n test_strs = [b""abc"", b""def"", b""ghi"", b""kkk"", b""...""]\n for s in test_strs:\n writer.write(s)\n writer.close()\n reader = ArrayRecordReader(self.test_file)\n\n def invalid_range1():\n reader.read(0, 0)\n\n self.assertRaises(IndexError, invalid_range1)\n\n def invalid_range2():\n reader.read(0, 100)\n\n self.assertRaises(IndexError, invalid_range2)\n\n def invalid_range3():\n reader.read(3, 2)\n\n self.assertRaises(IndexError, invalid_range3)\n\n self.assertEqual(reader.read(0, -1), test_strs[0:-1])\n self.assertEqual(reader.read(-3, -1), test_strs[-3:-1])\n self.assertEqual(reader.read(1, 3), test_strs[1:3])\n\n def test_writer_options(self):\n writer = ArrayRecordWriter(self.test_file, ""group_size:42"")\n writer.write(b""test123"")\n writer.close()\n reader = ArrayRecordReader(self.test_file)\n # Includes default options.\n self.assertEqual(\n reader.writer_options_string(),\n ""group_size:42,transpose:false,pad_to_block_boundary:false,zstd:3,""\n ""window_log:20,max_parallelism:1"")\n\nif __name__ == ""__main__"":\n absltest.main()\n",python,tab
|
19 |
+
18,279931,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",800,0,"",python,selection_mouse
|
20 |
+
19,281098,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",730,0,"",python,selection_command
|
21 |
+
20,283101,"utils/dataloader.py",0,0,"",python,tab
|
22 |
+
21,284547,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",0,0,"",python,tab
|
23 |
+
22,285789,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",735,0,"",python,selection_command
|
24 |
+
23,287295,"utils/dataloader.py",0,0,"",python,tab
|
25 |
+
24,287920,"utils/dataloader.py",127,0,"\n",python,content
|
26 |
+
25,288180,"utils/dataloader.py",128,0,"\nfrom array_record.python.array_record_module import ArrayRecordWriter",python,content
|
27 |
+
26,288186,"utils/dataloader.py",129,0,"",python,selection_command
|
28 |
+
27,288538,"utils/dataloader.py",128,0,"",python,selection_command
|
29 |
+
28,288763,"utils/dataloader.py",128,1,"",python,content
|
30 |
+
29,289214,"utils/dataloader.py",76,0,"",python,selection_command
|
31 |
+
30,290813,"utils/dataloader.py",76,52,"",python,content
|
32 |
+
31,290936,"utils/dataloader.py",146,0,"",python,selection_command
|
33 |
+
32,291104,"utils/dataloader.py",153,0,"",python,selection_command
|
34 |
+
33,291120,"utils/dataloader.py",83,0,"",python,selection_command
|
35 |
+
34,291302,"utils/dataloader.py",50,0,"",python,selection_command
|
36 |
+
35,291348,"utils/dataloader.py",55,0,"",python,selection_command
|
37 |
+
36,291635,"utils/dataloader.py",88,0,"",python,selection_command
|
38 |
+
37,291767,"utils/dataloader.py",93,0,"",python,selection_command
|
39 |
+
38,292023,"utils/dataloader.py",94,0,"",python,selection_command
|
40 |
+
39,292050,"utils/dataloader.py",100,0,"",python,selection_command
|
41 |
+
40,292081,"utils/dataloader.py",101,0,"",python,selection_command
|
42 |
+
41,292113,"utils/dataloader.py",121,0,"",python,selection_command
|
43 |
+
42,292320,"utils/dataloader.py",128,0,"",python,selection_command
|
44 |
+
43,292478,"utils/dataloader.py",146,0,"",python,selection_command
|
45 |
+
44,292902,"utils/dataloader.py",128,0,"",python,selection_command
|
46 |
+
45,293716,"utils/dataloader.py",2183,0,"",python,selection_command
|
47 |
+
46,294750,"utils/dataloader.py",2182,0,"",python,selection_command
|
48 |
+
47,294932,"utils/dataloader.py",2180,0,"",python,selection_command
|
49 |
+
48,295180,"utils/dataloader.py",2181,0,"",python,selection_command
|
50 |
+
49,295509,"utils/dataloader.py",2182,0,"",python,selection_command
|
51 |
+
50,295673,"utils/dataloader.py",2182,1,".",python,selection_command
|
52 |
+
51,295748,"utils/dataloader.py",2180,3,"io.",python,selection_command
|
53 |
+
52,295899,"utils/dataloader.py",2179,4,".io.",python,selection_command
|
54 |
+
53,296219,"utils/dataloader.py",2174,9,"grain.io.",python,selection_command
|
55 |
+
54,296334,"utils/dataloader.py",2174,9,"",python,content
|
56 |
+
55,297476,"utils/dataloader.py",2234,0,"",python,selection_command
|
57 |
+
56,297568,"utils/dataloader.py",2174,0,"",python,selection_command
|
58 |
+
57,299689,"utils/dataloader.py",2234,0,"",python,selection_command
|
59 |
+
58,299833,"utils/dataloader.py",2174,0,"",python,selection_command
|
60 |
+
59,300800,"utils/dataloader.py",128,0,"",python,selection_command
|
61 |
+
60,301476,"utils/dataloader.py",2174,0,"",python,selection_command
|
62 |
+
61,302888,"utils/dataloader.py",128,0,"",python,selection_command
|
63 |
+
62,304177,"utils/dataloader.py",2174,0,"",python,selection_command
|
64 |
+
63,304960,"utils/dataloader.py",2234,0,"",python,selection_command
|
65 |
+
64,305598,"utils/dataloader.py",2174,0,"",python,selection_command
|
66 |
+
65,307623,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",0,0,"",python,tab
|
67 |
+
66,309542,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",798,0,"",python,selection_command
|
68 |
+
67,309864,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",782,0,"",python,selection_command
|
69 |
+
68,310414,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",1081,0,"",python,selection_command
|
70 |
+
69,313809,"utils/dataloader.py",0,0,"",python,tab
|
71 |
+
70,314386,"utils/dataloader.py",128,0,"",python,selection_command
|
72 |
+
71,316143,"utils/dataloader.py",168,0,"",python,selection_command
|
73 |
+
72,316590,"utils/dataloader.py",128,0,"",python,selection_command
|
74 |
+
73,317380,"utils/dataloader.py",121,0,"",python,selection_command
|
75 |
+
74,317521,"utils/dataloader.py",101,0,"",python,selection_command
|
76 |
+
75,317689,"utils/dataloader.py",100,0,"",python,selection_command
|
77 |
+
76,317951,"utils/dataloader.py",101,0,"",python,selection_command
|
78 |
+
77,318108,"utils/dataloader.py",121,0,"",python,selection_command
|
79 |
+
78,318402,"utils/dataloader.py",101,0,"",python,selection_command
|
80 |
+
79,319825,"utils/dataloader.py",100,0,"",python,selection_command
|
81 |
+
80,320193,"utils/dataloader.py",94,0,"",python,selection_command
|
82 |
+
81,321189,"utils/dataloader.py",93,0,"",python,selection_command
|
83 |
+
82,321336,"utils/dataloader.py",81,0,"",python,selection_command
|
84 |
+
83,322309,"utils/dataloader.py",48,0,"",python,selection_command
|
85 |
+
84,322489,"utils/dataloader.py",55,0,"",python,selection_command
|
86 |
+
85,322729,"utils/dataloader.py",62,0,"",python,selection_command
|
87 |
+
86,322839,"utils/dataloader.py",65,0,"",python,selection_command
|
88 |
+
87,323006,"utils/dataloader.py",67,0,"",python,selection_command
|
89 |
+
88,323251,"utils/dataloader.py",65,0,"",python,selection_command
|
90 |
+
89,323426,"utils/dataloader.py",62,0,"",python,selection_command
|
91 |
+
90,324190,"utils/dataloader.py",41,0,"",python,selection_command
|
92 |
+
91,324505,"utils/dataloader.py",37,0,"",python,selection_command
|
93 |
+
92,325132,".venv/lib/python3.10/site-packages/grain/__init__.py",0,0,"# Copyright 2025 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the ""License"");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an ""AS IS"" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n""""""Public API for Grain.""""""\n\n\n# pylint: disable=g-importing-member\n# pylint: disable=unused-import\n# pylint: disable=g-multiple-import\n# pylint: disable=g-import-not-at-top\n\n# We import all public modules here to enable the use of `grain.foo.Bar`\n# instead of forcing users to write `from grain import foo as grain_foo`.\nfrom grain import (\n experimental,\n checkpoint,\n constants,\n multiprocessing,\n samplers,\n sharding,\n sources,\n transforms,\n)\n\nfrom grain._src.core.config import config\nfrom grain._src.python.data_loader import (\n DataLoader,\n DataLoaderIterator,\n)\nfrom grain._src.python.dataset.dataset import (\n DatasetIterator,\n IterDataset,\n MapDataset,\n)\nfrom grain._src.python.load import load\nfrom grain._src.python.options import ReadOptions\nfrom grain._src.python.record import Record, RecordMetadata\n",python,tab
|
94 |
+
93,325462,"utils/dataloader.py",0,0,"",python,tab
|
95 |
+
94,326040,"utils/dataloader.py",50,0,"",python,selection_command
|
96 |
+
95,326171,"utils/dataloader.py",83,0,"",python,selection_command
|
97 |
+
96,326570,"utils/dataloader.py",93,0,"",python,selection_command
|
98 |
+
97,327187,"utils/dataloader.py",144,0,"",python,selection_command
|
99 |
+
98,329930,"utils/dataloader.py",2174,0,"",python,selection_command
|
100 |
+
99,331428,".venv/lib/python3.10/site-packages/grain/__init__.py",0,0,"",python,tab
|
101 |
+
100,331705,"utils/dataloader.py",0,0,"",python,tab
|
102 |
+
101,334087,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",0,0,"",python,tab
|
103 |
+
102,334446,"utils/dataloader.py",0,0,"",python,tab
|
104 |
+
103,341815,"utils/dataloader.py",2234,0,"",python,selection_command
|
105 |
+
104,342051,"utils/dataloader.py",2174,0,"",python,selection_command
|
106 |
+
105,342339,"utils/dataloader.py",2082,0,"",python,selection_command
|
107 |
+
106,342342,"utils/dataloader.py",2047,0,"",python,selection_command
|
108 |
+
107,342363,"utils/dataloader.py",2016,0,"",python,selection_command
|
109 |
+
108,342390,"utils/dataloader.py",1991,0,"",python,selection_command
|
110 |
+
109,342424,"utils/dataloader.py",1960,0,"",python,selection_command
|
111 |
+
110,342536,"utils/dataloader.py",1933,0,"",python,selection_command
|
112 |
+
111,342539,"utils/dataloader.py",1928,0,"",python,selection_command
|
113 |
+
112,342542,"utils/dataloader.py",1902,0,"",python,selection_command
|
114 |
+
113,342737,"utils/dataloader.py",1897,0,"",python,selection_command
|
115 |
+
114,343068,"utils/dataloader.py",1878,0,"",python,selection_command
|
116 |
+
115,343231,"utils/dataloader.py",1857,0,"",python,selection_command
|
117 |
+
116,343406,"utils/dataloader.py",1830,0,"",python,selection_command
|
118 |
+
117,344689,"utils/dataloader.py",1857,0,"",python,selection_command
|
119 |
+
118,344820,"utils/dataloader.py",1878,0,"",python,selection_command
|
120 |
+
119,345161,"utils/dataloader.py",1897,0,"",python,selection_command
|
121 |
+
120,345268,"utils/dataloader.py",1902,0,"",python,selection_command
|
122 |
+
121,345488,"utils/dataloader.py",1928,0,"",python,selection_command
|
123 |
+
122,345684,"utils/dataloader.py",1933,0,"",python,selection_command
|
124 |
+
123,345738,"utils/dataloader.py",1960,0,"",python,selection_command
|
125 |
+
124,346367,"utils/dataloader.py",1935,0,"",python,selection_command
|
126 |
+
125,350517,"utils/dataloader.py",1962,0,"",python,selection_command
|
127 |
+
126,350550,"utils/dataloader.py",1970,0,"",python,selection_command
|
128 |
+
127,350730,"utils/dataloader.py",1984,0,"",python,selection_command
|
129 |
+
128,350899,"utils/dataloader.py",1986,0,"",python,selection_command
|
130 |
+
129,353351,"utils/dataloader.py",1984,0,"",python,selection_command
|
131 |
+
130,353465,"utils/dataloader.py",1970,0,"",python,selection_command
|
132 |
+
131,365182,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",0,0,"",python,tab
|
133 |
+
132,368404,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",1048,0,"",python,selection_command
|
134 |
+
133,368651,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",1034,0,"",python,selection_command
|
135 |
+
134,368680,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",997,0,"",python,selection_command
|
136 |
+
135,368707,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",930,0,"",python,selection_command
|
137 |
+
136,368741,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",883,0,"",python,selection_command
|
138 |
+
137,368770,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",864,0,"",python,selection_command
|
139 |
+
138,368823,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",850,0,"",python,selection_command
|
140 |
+
139,368840,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",815,0,"",python,selection_command
|
141 |
+
140,368870,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",801,0,"",python,selection_command
|
142 |
+
141,369134,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",800,0,"",python,selection_command
|
143 |
+
142,369285,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",743,0,"",python,selection_command
|
144 |
+
143,369384,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",747,0,"",python,selection_command
|
145 |
+
144,369650,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",748,0,"",python,selection_command
|
146 |
+
145,369819,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",754,0,"",python,selection_command
|
147 |
+
146,370140,".venv/lib/python3.10/site-packages/array_record/python/array_record_module_test.py",755,0,"",python,selection_command
|
148 |
+
147,373800,"utils/dataloader.py",0,0,"",python,tab
|
149 |
+
148,491727,"utils/dataloader.py",2293,0,"",python,selection_command
|
150 |
+
149,513277,"utils/dataloader.py",2262,0,"",python,selection_command
|
151 |
+
150,513490,"utils/dataloader.py",2234,0,"",python,selection_command
|
152 |
+
151,513578,"utils/dataloader.py",2174,0,"",python,selection_command
|
153 |
+
152,513913,"utils/dataloader.py",2082,0,"",python,selection_command
|
154 |
+
153,515523,"utils/dataloader.py",2065,0,"",python,selection_command
|
155 |
+
154,515900,"utils/dataloader.py",2157,0,"",python,selection_command
|
156 |
+
155,516046,"utils/dataloader.py",2217,0,"",python,selection_command
|
157 |
+
156,517211,"utils/dataloader.py",2262,0,"",python,selection_command
|
158 |
+
157,517341,"utils/dataloader.py",2276,0,"",python,selection_command
|
159 |
+
158,517500,"utils/dataloader.py",2320,0,"",python,selection_command
|
160 |
+
159,522595,"utils/dataloader.py",2341,0,"",python,selection_command
|
161 |
+
160,522733,"utils/dataloader.py",2355,0,"",python,selection_command
|
162 |
+
161,529340,"utils/dataloader.py",2367,0,"",python,selection_command
|
163 |
+
162,529535,"utils/dataloader.py",2369,0,"",python,selection_command
|
164 |
+
163,530400,"utils/dataloader.py",615,0,"",python,selection_command
|
165 |
+
164,531632,"utils/dataloader.py",300,0,"",python,selection_command
|
166 |
+
165,576701,"utils/dataloader.py",2018,724," arrayrecord_files.append(output_file)\n print(f""Converted {tfrecord_file.name} -> {output_filename}: {record_count} records"")",python,content
|
167 |
+
166,576702,"utils/dataloader.py",1406,602," arrayrecord_files = []\n \n for tfrecord_file in tfrecord_files:\n # Create output filename based on input filename\n output_filename = tfrecord_file.stem + "".arrayrecord""\n output_file = os.path.join(output_folder, output_filename)\n \n # Create dataset from single TFRecord file\n dataset = tf.data.TFRecordDataset(str(tfrecord_file))\n \n def parse_tfrecord(example_proto):\n """"""Parse a single TFRecord example.""""""\n parsed_features = tf.io.parse_single_example(example_proto, feature_description)\n return {\n 'raw_video': parsed_features['raw_video'].numpy(),\n 'sequence_length': int(parsed_features['sequence_length'].numpy())\n }\n \n # Process records and write to single ArrayRecord file\n record_count = 0\n with ArrayRecordWriter(output_file) as writer:\n for record in dataset:\n parsed_record = parse_tfrecord(record)\n writer.write(parsed_record)\n record_count += 1",python,content
|
168 |
+
167,576702,"utils/dataloader.py",607,74,"",python,content
|
169 |
+
168,576702,"utils/dataloader.py",459,0," Creates one ArrayRecord file per TFRecord file.\n",python,content
|
170 |
+
169,576702,"utils/dataloader.py",296,34,"",python,content
|
171 |
+
170,599963,"utils/dataloader.py",271,0,"",python,selection_mouse
|
172 |
+
171,599968,"utils/dataloader.py",270,0,"",python,selection_command
|
173 |
+
172,601507,"utils/dataloader.py",294,0,"",python,selection_command
|
174 |
+
173,601732,"utils/dataloader.py",320,0,"",python,selection_command
|
175 |
+
174,601855,"utils/dataloader.py",345,0,"",python,selection_command
|
176 |
+
175,602070,"utils/dataloader.py",353,0,"",python,selection_command
|
177 |
+
176,602297,"utils/dataloader.py",379,0,"",python,selection_command
|
178 |
+
177,603470,"utils/dataloader.py",449,0,"",python,selection_command
|
179 |
+
178,603704,"utils/dataloader.py",480,0,"",python,selection_command
|
180 |
+
179,603728,"utils/dataloader.py",490,0,"",python,selection_command
|
181 |
+
180,603897,"utils/dataloader.py",516,0,"",python,selection_command
|
182 |
+
181,604106,"utils/dataloader.py",582,0,"",python,selection_command
|
183 |
+
182,604983,"utils/dataloader.py",649,0,"",python,selection_command
|
184 |
+
183,605236,"utils/dataloader.py",728,0,"",python,selection_command
|
185 |
+
184,605262,"utils/dataloader.py",775,0,"",python,selection_command
|
186 |
+
185,605296,"utils/dataloader.py",788,0,"",python,selection_command
|
187 |
+
186,605328,"utils/dataloader.py",814,0,"",python,selection_command
|
188 |
+
187,605360,"utils/dataloader.py",847,0,"",python,selection_command
|
189 |
+
188,605393,"utils/dataloader.py",873,0,"",python,selection_command
|
190 |
+
189,605430,"utils/dataloader.py",909,0,"",python,selection_command
|
191 |
+
190,605459,"utils/dataloader.py",941,0,"",python,selection_command
|
192 |
+
191,605494,"utils/dataloader.py",1004,0,"",python,selection_command
|
193 |
+
192,605526,"utils/dataloader.py",1056,0,"",python,selection_command
|
194 |
+
193,605559,"utils/dataloader.py",1061,0,"",python,selection_command
|
195 |
+
194,605593,"utils/dataloader.py",1087,0,"",python,selection_command
|
196 |
+
195,605637,"utils/dataloader.py",1112,0,"",python,selection_command
|
197 |
+
196,605659,"utils/dataloader.py",1138,0,"",python,selection_command
|
198 |
+
197,605692,"utils/dataloader.py",1206,0,"",python,selection_command
|
199 |
+
198,605726,"utils/dataloader.py",1233,0,"",python,selection_command
|
200 |
+
199,605759,"utils/dataloader.py",1286,0,"",python,selection_command
|
201 |
+
200,605793,"utils/dataloader.py",1312,0,"",python,selection_command
|
202 |
+
201,605826,"utils/dataloader.py",1348,0,"",python,selection_command
|
203 |
+
202,605859,"utils/dataloader.py",1374,0,"",python,selection_command
|
204 |
+
203,605893,"utils/dataloader.py",1380,0,"",python,selection_command
|
205 |
+
204,605927,"utils/dataloader.py",1406,0,"",python,selection_command
|
206 |
+
205,605960,"utils/dataloader.py",1447,0,"",python,selection_command
|
207 |
+
206,605994,"utils/dataloader.py",1504,0,"",python,selection_command
|
208 |
+
207,606026,"utils/dataloader.py",1566,0,"",python,selection_command
|
209 |
+
208,606060,"utils/dataloader.py",1616,0,"",python,selection_command
|
210 |
+
209,606094,"utils/dataloader.py",1642,0,"",python,selection_command
|
211 |
+
210,606127,"utils/dataloader.py",1693,0,"",python,selection_command
|
212 |
+
211,606161,"utils/dataloader.py",1738,0,"",python,selection_command
|
213 |
+
212,606278,"utils/dataloader.py",1693,0,"",python,selection_command
|
214 |
+
213,606538,"utils/dataloader.py",1642,0,"",python,selection_command
|
215 |
+
214,606562,"utils/dataloader.py",1616,0,"",python,selection_command
|
216 |
+
215,606817,"utils/dataloader.py",1566,0,"",python,selection_command
|
217 |
+
216,606989,"utils/dataloader.py",1504,0,"",python,selection_command
|
218 |
+
217,607180,"utils/dataloader.py",1447,0,"",python,selection_command
|
219 |
+
218,607353,"utils/dataloader.py",1406,0,"",python,selection_command
|
220 |
+
219,607493,"utils/dataloader.py",1380,0,"",python,selection_command
|
221 |
+
220,607736,"utils/dataloader.py",1374,0,"",python,selection_command
|
222 |
+
221,608023,"utils/dataloader.py",1348,0,"",python,selection_command
|
223 |
+
222,612690,"utils/dataloader.py",1353,0,"",python,selection_command
|
224 |
+
223,612849,"utils/dataloader.py",1380,0,"",python,selection_command
|
225 |
+
224,613060,"utils/dataloader.py",1385,0,"",python,selection_command
|
226 |
+
225,613084,"utils/dataloader.py",1426,0,"",python,selection_command
|
227 |
+
226,613551,"utils/dataloader.py",1423,57,"",python,content
|
228 |
+
227,613560,"utils/dataloader.py",1431,0,"",python,selection_command
|
229 |
+
228,614317,"utils/dataloader.py",1390,0,"",python,selection_command
|
230 |
+
229,615034,"utils/dataloader.py",1431,0,"",python,selection_command
|
231 |
+
230,615216,"utils/dataloader.py",1493,0,"",python,selection_command
|
232 |
+
231,615478,"utils/dataloader.py",1559,0,"",python,selection_command
|
233 |
+
232,615710,"utils/dataloader.py",1569,0,"",python,selection_command
|
234 |
+
233,615986,"utils/dataloader.py",1561,51,"",python,content
|
235 |
+
234,616001,"utils/dataloader.py",1569,0,"",python,selection_command
|
236 |
+
235,616159,"utils/dataloader.py",1559,0,"",python,selection_command
|
237 |
+
236,621081,"utils/dataloader.py",1492,0,"",python,selection_command
|
238 |
+
237,621317,"utils/dataloader.py",1430,0,"",python,selection_command
|
239 |
+
238,622701,"utils/dataloader.py",1492,0,"",python,selection_command
|
240 |
+
239,623216,"utils/dataloader.py",1493,0,"",python,selection_command
|
241 |
+
240,625105,"utils/dataloader.py",1559,0,"",python,selection_command
|
242 |
+
241,625431,"utils/dataloader.py",1569,0,"",python,selection_command
|
243 |
+
242,625434,"utils/dataloader.py",1630,0,"",python,selection_command
|
244 |
+
243,625571,"utils/dataloader.py",1640,0,"",python,selection_command
|
245 |
+
244,625816,"utils/dataloader.py",1683,0,"",python,selection_command
|
246 |
+
245,626061,"utils/dataloader.py",1734,0,"",python,selection_command
|
247 |
+
246,626094,"utils/dataloader.py",1827,0,"",python,selection_command
|
248 |
+
247,626127,"utils/dataloader.py",1848,0,"",python,selection_command
|
249 |
+
248,626160,"utils/dataloader.py",1915,0,"",python,selection_command
|
250 |
+
249,626186,"utils/dataloader.py",1998,0,"",python,selection_command
|
251 |
+
250,626363,"utils/dataloader.py",2011,0,"",python,selection_command
|
252 |
+
251,626531,"utils/dataloader.py",2021,0,"",python,selection_command
|
253 |
+
252,626996,"utils/dataloader.py",2013,63,"",python,content
|
254 |
+
253,627000,"utils/dataloader.py",2021,0,"",python,selection_command
|
255 |
+
254,627549,"utils/dataloader.py",2011,0,"",python,selection_command
|
256 |
+
255,627722,"utils/dataloader.py",1998,0,"",python,selection_command
|
257 |
+
256,628044,"utils/dataloader.py",2011,0,"",python,selection_command
|
258 |
+
257,629306,"utils/dataloader.py",2020,0,"",python,selection_command
|
259 |
+
258,629467,"utils/dataloader.py",2045,0,"",python,selection_command
|
260 |
+
259,629649,"utils/dataloader.py",2100,0,"",python,selection_command
|
261 |
+
260,633704,"utils/dataloader.py",2135,0,"",python,selection_command
|
262 |
+
261,633854,"utils/dataloader.py",2190,0,"",python,selection_command
|
263 |
+
262,634054,"utils/dataloader.py",2234,0,"",python,selection_command
|
264 |
+
263,634165,"utils/dataloader.py",2268,0,"",python,selection_command
|
265 |
+
264,636469,"utils/dataloader.py",2277,0,"",python,selection_command
|
266 |
+
265,644161,"utils/dataloader.py",1380,0,"",python,selection_command
|
267 |
+
266,644519,"utils/dataloader.py",566,0,"",python,selection_command
|
268 |
+
267,645025,"utils/dataloader.py",0,0,"",python,selection_command
|
269 |
+
268,645688,"utils/dataloader.py",11,0,"",python,selection_command
|
270 |
+
269,646047,"utils/dataloader.py",30,0,"",python,selection_command
|
271 |
+
270,646050,"utils/dataloader.py",43,0,"",python,selection_command
|
272 |
+
271,646053,"utils/dataloader.py",76,0,"",python,selection_command
|
273 |
+
272,646057,"utils/dataloader.py",146,0,"",python,selection_command
|
274 |
+
273,646060,"utils/dataloader.py",170,0,"",python,selection_command
|
275 |
+
274,646116,"utils/dataloader.py",180,0,"",python,selection_command
|
276 |
+
275,646297,"utils/dataloader.py",205,0,"",python,selection_command
|
277 |
+
276,646496,"utils/dataloader.py",206,0,"",python,selection_command
|
278 |
+
277,647665,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
279 |
+
278,649542,"utils/dataloader.py",0,0,"",python,tab
|
280 |
+
279,649547,"TERMINAL",0,0,"",,terminal_focus
|
281 |
+
280,656920,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/",,terminal_command
|
282 |
+
281,656970,"TERMINAL",0,0,"]633;E;2025-06-30 21:14:10 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/;c4a63260-8af4-413c-a66a-959384934181]633;C",,terminal_output
|
283 |
+
282,657055,"TERMINAL",0,0,"[0m[01;34mcheckpoints[0m [01;34mdummy[0m [01;34mknoms_npy[0m [01;34mknoms_tfrecords_2_shards_overfit[0m [01;34mknoms_tfrecords_500_shards_overfit_10[0m [01;34mopen_ai_minecraft_first_try_npy[0m [01;34mopen_ai_minecraft_tfrecord[0m\r\n[01;34mcoinrun[0m [01;34mknoms_mp4[0m [01;34mknoms_tfrecords[0m [01;34mknoms_tfrecords_500_shards[0m [01;34mopen_ai_minecraft[0m [01;34mopen_ai_minecraft_first_try_tfrecord[0m [01;34moverfit_dir[0m\r\n[01;34mdata_knoms[0m [01;34mknoms_mp4_clips[0m [01;34mknoms_tfrecords_200_shards[0m [01;34mknoms_tfrecords_500_shards_overfit_1[0m [01;34mopen_ai_minecraft_first_try[0m [01;34mopen_ai_minecraft_npy[0m [01;34mprocgen_env_16_episodes_20000[0m\r\n]0;tum_dbd0378@hkn1990:~/jafar]633;D;0",,terminal_output
|
284 |
+
283,681039,"experiments/tokenizer_knoms_time_measurement.sh",0,0,"#!/usr/bin/env bash\nmodule unload mpi/openmpi/5.0\nmodule unload devel/cuda/12.4\nsource .venv/bin/activate\n\n\ndata_dir='/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards'\n\n# FIXME: the `time_measurement_interval` line is only for debugging and should be removed\nsrun python train_tokenizer.py \\n --time_measurement_interval 10 \\n --batch_size 768 \\n --num_steps 300000 \\n --warmup_steps 10000 \\n --seed 0 \\n --min_lr=0.0000866 \\n --max_lr=0.0000866 \\n --data_dir $data_dir",shellscript,tab
|
285 |
+
284,688383,"TERMINAL",0,0,"ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards",,terminal_command
|
286 |
+
285,688468,"TERMINAL",0,0,"]633;E;2025-06-30 21:14:41 ls /hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards;c4a63260-8af4-413c-a66a-959384934181]633;C",,terminal_output
|
287 |
+
286,688570,"TERMINAL",0,0,"shard-00000-of-00500.tfrecord shard-00084-of-00500.tfrecord shard-00168-of-00500.tfrecord shard-00252-of-00500.tfrecord shard-00336-of-00500.tfrecord shard-00420-of-00500.tfrecord\r\nshard-00001-of-00500.tfrecord shard-00085-of-00500.tfrecord shard-00169-of-00500.tfrecord shard-00253-of-00500.tfrecord shard-00337-of-00500.tfrecord shard-00421-of-00500.tfrecord\r\nshard-00002-of-00500.tfrecord shard-00086-of-00500.tfrecord shard-00170-of-00500.tfrecord shard-00254-of-00500.tfrecord shard-00338-of-00500.tfrecord shard-00422-of-00500.tfrecord\r\nshard-00003-of-00500.tfrecord shard-00087-of-00500.tfrecord shard-00171-of-00500.tfrecord shard-00255-of-00500.tfrecord shard-00339-of-00500.tfrecord shard-00423-of-00500.tfrecord\r\nshard-00004-of-00500.tfrecord shard-00088-of-00500.tfrecord shard-00172-of-00500.tfrecord shard-00256-of-00500.tfrecord shard-00340-of-00500.tfrecord shard-00424-of-00500.tfrecord\r\nshard-00005-of-00500.tfrecord shard-00089-of-00500.tfrecord shard-00173-of-00500.tfrecord shard-00257-of-00500.tfrecord shard-00341-of-00500.tfrecord shard-00425-of-00500.tfrecord\r\nshard-00006-of-00500.tfrecord shard-00090-of-00500.tfrecord shard-00174-of-00500.tfrecord shard-00258-of-00500.tfrecord shard-00342-of-00500.tfrecord shard-00426-of-00500.tfrecord\r\nshard-00007-of-00500.tfrecord shard-00091-of-00500.tfrecord shard-00175-of-00500.tfrecord shard-00259-of-00500.tfrecord shard-00343-of-00500.tfrecord shard-00427-of-00500.tfrecord\r\nshard-00008-of-00500.tfrecord shard-00092-of-00500.tfrecord shard-00176-of-00500.tfrecord shard-00260-of-00500.tfrecord shard-00344-of-00500.tfrecord shard-00428-of-00500.tfrecord\r\nshard-00009-of-00500.tfrecord shard-00093-of-00500.tfrecord shard-00177-of-00500.tfrecord shard-00261-of-00500.tfrecord shard-00345-of-00500.tfrecord shard-00429-of-00500.tfrecord\r\nshard-00010-of-00500.tfrecord shard-00094-of-00500.tfrecord shard-00178-of-00500.tfrecord shard-00262-of-00500.tfrecord shard-00346-of-00500.tfrecord shard-00430-of-00500.tfrecord\r\nshard-00011-of-00500.tfrecord shard-00095-of-00500.tfrecord shard-00179-of-00500.tfrecord shard-00263-of-00500.tfrecord shard-00347-of-00500.tfrecord shard-00431-of-00500.tfrecord\r\nshard-00012-of-00500.tfrecord shard-00096-of-00500.tfrecord shard-00180-of-00500.tfrecord shard-00264-of-00500.tfrecord shard-00348-of-00500.tfrecord shard-00432-of-00500.tfrecord\r\nshard-00013-of-00500.tfrecord shard-00097-of-00500.tfrecord shard-00181-of-00500.tfrecord shard-00265-of-00500.tfrecord shard-00349-of-00500.tfrecord shard-00433-of-00500.tfrecord\r\nshard-00014-of-00500.tfrecord shard-00098-of-00500.tfrecord shard-00182-of-00500.tfrecord shard-00266-of-00500.tfrecord shard-00350-of-00500.tfrecord shard-00434-of-00500.tfrecord\r\nshard-00015-of-00500.tfrecord shard-00099-of-00500.tfrecord shard-00183-of-00500.tfrecord shard-00267-of-00500.tfrecord shard-00351-of-00500.tfrecord shard-00435-of-00500.tfrecord\r\nshard-00016-of-00500.tfrecord shard-00100-of-00500.tfrecord shard-00184-of-00500.tfrecord shard-00268-of-00500.tfrecord shard-00352-of-00500.tfrecord shard-00436-of-00500.tfrecord\r\nshard-00017-of-00500.tfrecord shard-00101-of-00500.tfrecord shard-00185-of-00500.tfrecord shard-00269-of-00500.tfrecord shard-00353-of-00500.tfrecord shard-00437-of-00500.tfrecord\r\nshard-00018-of-00500.tfrecord shard-00102-of-00500.tfrecord shard-00186-of-00500.tfrecord shard-00270-of-00500.tfrecord shard-00354-of-00500.tfrecord shard-00438-of-00500.tfrecord\r\nshard-00019-of-00500.tfrecord shard-00103-of-00500.tfrecord shard-00187-of-00500.tfrecord shard-00271-of-00500.tfrecord shard-00355-of-00500.tfrecord shard-00439-of-00500.tfrecord\r\nshard-00020-of-00500.tfrecord shard-00104-of-00500.tfrecord shard-00188-of-00500.tfrecord shard-00272-of-00500.tfrecord shard-00356-of-00500.tfrecord shard-00440-of-00500.tfrecord\r\nshard-00021-of-00500.tfrecord shard-00105-of-00500.tfrecord shard-00189-of-00500.tfrecord shard-00273-of-00500.tfrecord shard-00357-of-00500.tfrecord shard-00441-of-00500.tfrecord\r\nshard-00022-of-00500.tfrecord shard-00106-of-00500.tfrecord shard-00190-of-00500.tfrecord shard-00274-of-00500.tfrecord shard-00358-of-00500.tfrecord shard-00442-of-00500.tfrecord\r\nshard-00023-of-00500.tfrecord shard-00107-of-00500.tfrecord shard-00191-of-00500.tfrecord shard-00275-of-00500.tfrecord shard-00359-of-00500.tfrecord shard-00443-of-00500.tfrecord\r\nshard-00024-of-00500.tfrecord shard-00108-of-00500.tfrecord shard-00192-of-00500.tfrecord shard-00276-of-00500.tfrecord shard-00360-of-00500.tfrecord shard-00444-of-00500.tfrecord\r\nshard-00025-of-00500.tfrecord shard-00109-of-00500.tfrecord shard-00193-of-00500.tfrecord shard-00277-of-00500.tfrecord shard-00361-of-00500.tfrecord shard-00445-of-00500.tfrecord\r\nshard-00026-of-00500.tfrecord shard-00110-of-00500.tfrecord shard-00194-of-00500.tfrecord shard-00278-of-00500.tfrecord shard-00362-of-00500.tfrecord shard-00446-of-00500.tfrecord\r\nshard-00027-of-00500.tfrecord shard-00111-of-00500.tfrecord shard-00195-of-00500.tfrecord shard-00279-of-00500.tfrecord shard-00363-of-00500.tfrecord shard-00447-of-00500.tfrecord\r\nshard-00028-of-00500.tfrecord shard-00112-of-00500.tfrecord shard-00196-of-00500.tfrecord shard-00280-of-00500.tfrecord shard-00364-of-00500.tfrecord shard-00448-of-00500.tfrecord\r\nshard-00029-of-00500.tfrecord shard-00113-of-00500.tfrecord shard-00197-of-00500.tfrecord shard-00281-of-00500.tfrecord shard-00365-of-00500.tfrecord shard-00449-of-00500.tfrecord\r\nshard-00030-of-00500.tfrecord shard-00114-of-00500.tfrecord shard-00198-of-00500.tfrecord shard-00282-of-00500.tfrecord shard-00366-of-00500.tfrecord shard-00450-of-00500.tfrecord\r\nshard-00031-of-00500.tfrecord shard-00115-of-00500.tfrecord shard-00199-of-00500.tfrecord shard-00283-of-00500.tfrecord shard-00367-of-00500.tfrecord shard-00451-of-00500.tfrecord\r\nshard-00032-of-00500.tfrecord shard-00116-of-00500.tfrecord shard-00200-of-00500.tfrecord shard-00284-of-00500.tfrecord shard-00368-of-00500.tfrecord shard-00452-of-00500.tfrecord\r\nshard-00033-of-00500.tfrecord shard-00117-of-00500.tfrecord shard-00201-of-00500.tfrecord shard-00285-of-00500.tfrecord shard-00369-of-00500.tfrecord shard-00453-of-00500.tfrecord\r\nshard-00034-of-00500.tfrecord shard-00118-of-00500.tfrecord shard-00202-of-00500.tfrecord shard-00286-of-00500.tfrecord shard-00370-of-00500.tfrecord shard-00454-of-00500.tfrecord\r\nshard-00035-of-00500.tfrecord shard-00119-of-00500.tfrecord shard-00203-of-00500.tfrecord shard-00287-of-00500.tfrecord shard-00371-of-00500.tfrecord shard-00455-of-00500.tfrecord\r\nshard-00036-of-00500.tfrecord shard-00120-of-00500.tfrecord shard-00204-of-00500.tfrecord shard-00288-of-00500.tfrecord shard-00372-of-00500.tfrecord shard-00456-of-00500.tfrecord\r\nshard-00037-of-00500.tfrecord shard-00121-of-00500.tfrecord shard-00205-of-00500.tfrecord shard-00289-of-00500.tfrecord shard-00373-of-00500.tfrecord shard-00457-of-00500.tfrecord\r\nshard-00038-of-00500.tfrecord shard-00122-of-00500.tfrecord shard-00206-of-00500.tfrecord shard-00290-of-00500.tfrecord shard-00374-of-00500.tfrecord shard-00458-of-00500.tfrecord\r\nshard-00039-of-00500.tfrecord shard-00123-of-00500.tfrecord shard-00207-of-00500.tfrecord shard-00291-of-00500.tfrecord shard-00375-of-00500.tfrecord shard-00459-of-00500.tfrecord\r\nshard-00040-of-00500.tfrecord shard-00124-of-00500.tfrecord shard-00208-of-00500.tfrecord shard-00292-of-00500.tfrecord shard-00376-of-00500.tfrecord shard-00460-of-00500.tfrecord\r\nshard-00041-of-00500.tfrecord shard-00125-of-00500.tfrecord shard-00209-of-00500.tfrecord shard-00293-of-00500.tfrecord shard-00377-of-00500.tfrecord shard-00461-of-00500.tfrecord\r\nshard-00042-of-00500.tfrecord shard-00126-of-00500.tfrecord shard-00210-of-00500.tfrecord shard-00294-of-00500.tfrecord shard-00378-of-00500.tfrecord shard-00462-of-00500.tfrecord\r\nshard-00043-of-00500.tfrecord shard-00127-of-00500.tfrecord shard-00211-of-00500.tfrecord shard-00295-of-00500.tfrecord shard-00379-of-00500.tfrecord shard-00463-of-00500.tfrecord\r\nshard-00044-of-00500.tfrecord shard-00128-of-00500.tfrecord shard-00212-of-00500.tfrecord shard-00296-of-00500.tfrecord shard-00380-of-00500.tfrecord shard-00464-of-00500.tfrecord\r\nshard-00045-of-00500.tfrecord shard-00129-of-00500.tfrecord shard-00213-of-00500.tfrecord shard-00297-of-00500.tfrecord shard-00381-of-00500.tfrecord shard-00465-of-00500.tfrecord\r\nshard-00046-of-00500.tfrecord shard-00130-of-00500.tfrecord shard-00214-of-00500.tfrecord shard-00298-of-00500.tfrecord shard-00382-of-00500.tfrecord shard-00466-of-00500.tfrecord\r\nshard-00047-of-00500.tfrecord shard-00131-of-00500.tfrecord shard-00215-of-00500.tfrecord shard-00299-of-00500.tfrecord shard-00383-of-00500.tfrecord shard-00467-of-00500.tfrecord\r\nshard-00048-of-00500.tfrecord shard-00132-of-00500.tfrecord shard-00216-of-00500.tfrecord shard-00300-of-00500.tfrecord shard-00384-of-00500.tfrecord shard-00468-of-00500.tfrecord\r\nshard-00049-of-00500.tfrecord shard-00133-of-00500.tfrecord shard-00217-of-00500.tfrecord shard-00301-of-00500.tfrecord shard-00385-of-00500.tfrecord shard-00469-of-00500.tfrecord\r\nshard-00050-of-00500.tfrecord shard-00134-of-00500.tfrecord shard-00218-of-00500.tfrecord shard-00302-of-00500.tfrecord shard-00386-of-00500.tfrecord shard-00470-of-00500.tfrecord\r\nshard-00051-of-00500.tfrecord shard-00135-of-00500.tfrecord shard-00219-of-00500.tfrecord shard-00303-of-00500.tfrecord shard-00387-of-00500.tfrecord shard-00471-of-00500.tfrecord\r\nshard-00052-of-00500.tfrecord shard-00136-of-00500.tfrecord shard-00220-of-00500.tfrecord shard-00304-of-00500.tfrecord shard-00388-of-00500.tfrecord shard-00472-of-00500.tfrecord\r\nshard-00053-of-00500.tfrecord shard-00137-of-00500.tfrecord shard-00221-of-00500.tfrecord shard-00305-of-00500.tfrecord shard-00389-of-00500.tfrecord shard-00473-of-00500.tfrecord\r\nshard-00054-of-00500.tfrecord shard-00138-of-00500.tfrecord shard-00222-of-00500.tfrecord shard-00306-of-00500.tfrecord shard-00390-of-00500.tfrecord shard-00474-of-00500.tfrecord\r\nshard-00055-of-00500.tfrecord shard-00139-of-00500.tfrecord shard-00223-of-00500.tfrecord shard-00307-of-00500.tfrecord shard-00391-of-00500.tfrecord shard-00475-of-00500.tfrecord\r\nshard-00056-of-00500.tfrecord shard-00140-of-00500.tfrecord shard-00224-of-00500.tfrecord shard-00308-of-00500.tfrecord shard-00392-of-00500.tfrecord shard-00476-of-00500.tfrecord\r\nshard-00057-of-00500.tfrecord shard-00141-of-00500.tfrecord shard-00225-of-00500.tfrecord shard-00309-of-00500.tfrecord shard-00393-of-00500.tfrecord shard-00477-of-00500.tfrecord\r\nshard-00058-of-00500.tfrecord shard-00142-of-00500.tfrecord shard-00226-of-00500.tfrecord shard-00310-of-00500.tfrecord shard-00394-of-00500.tfrecord shard-00478-of-00500.tfrecord\r\nshard-00059-of-00500.tfrecord shard-00143-of-00500.tfrecord shard-00227-of-00500.tfrecord shard-00311-of-00500.tfrecord shard-00395-of-00500.tfrecord shard-00479-of-00500.tfrecord\r\nshard-00060-of-00500.tfrecord shard-00144-of-00500.tfrecord shard-00228-of-00500.tfrecord shard-00312-of-00500.tfrecord shard-00396-of-00500.tfrecord shard-00480-of-00500.tfrecord\r\nshard-00061-of-00500.tfrecord shard-00145-of-00500.tfrecord shard-00229-of-00500.tfrecord shard-00313-of-00500.tfrecord shard-00397-of-00500.tfrecord shard-00481-of-00500.tfrecord\r\nshard-00062-of-00500.tfrecord shard-00146-of-00500.tfrecord shard-00230-of-00500.tfrecord shard-00314-of-00500.tfrecord shard-00398-of-00500.tfrecord shard-00482-of-00500.tfrecord\r\nshard-00063-of-00500.tfrecord shard-00147-of-00500.tfrecord shard-00231-of-00500.tfrecord shard-00315-of-00500.tfrecord shard-00399-of-00500.tfrecord shard-00483-of-00500.tfrecord\r\nshard-00064-of-00500.tfrecord shard-00148-of-00500.tfrecord shard-00232-of-00500.tfrecord shard-00316-of-00500.tfrecord shard-00400-of-00500.tfrecord shard-00484-of-00500.tfrecord\r\nshard-00065-of-00500.tfrecord shard-00149-of-00500.tfrecord shard-00233-of-00500.tfrecord shard-00317-of-00500.tfrecord shard-00401-of-00500.tfrecord shard-00485-of-00500.tfrecord\r\nshard-00066-of-00500.tfrecord shard-00150-of-00500.tfrecord shard-00234-of-00500.tfrecord shard-00318-of-00500.tfrecord shard-00402-of-00500.tfrecord shard-00486-of-00500.tfrecord\r\nshard-00067-of-00500.tfrecord shard-00151-of-00500.tfrecord shard-00235-of-00500.tfrecord shard-00319-of-00500.tfrecord shard-00403-of-00500.tfrecord shard-00487-of-00500.tfrecord\r\nshard-00068-of-00500.tfrecord shard-00152-of-00500.tfrecord shard-00236-of-00500.tfrecord shard-00320-of-00500.tfrecord shard-00404-of-00500.tfrecord shard-00488-of-00500.tfrecord\r\nshard-00069-of-00500.tfrecord shard-00153-of-00500.tfrecord shard-00237-of-00500.tfrecord shard-00321-of-00500.tfrecord shard-00405-of-00500.tfrecord shard-00489-of-00500.tfrecord\r\nshard-00070-of-00500.tfrecord shard-00154-of-00500.tfrecord shard-00238-of-00500.tfrecord shard-00322-of-00500.tfrecord shard-00406-of-00500.tfrecord shard-00490-of-00500.tfrecord\r\nshard-00071-of-00500.tfrecord shard-00155-of-00500.tfrecord shard-00239-of-00500.tfrecord shard-00323-of-00500.tfrecord shard-00407-of-00500.tfrecord shard-00491-of-00500.tfrecord\r\nshard-00072-of-00500.tfrecord shard-00156-of-00500.tfrecord shard-00240-of-00500.tfrecord shard-00324-of-00500.tfrecord shard-00408-of-00500.tfrecord shard-00492-of-00500.tfrecord\r\nshard-00073-of-00500.tfrecord shard-00157-of-00500.tfrecord shard-00241-of-00500.tfrecord shard-00325-of-00500.tfrecord shard-00409-of-00500.tfrecord shard-00493-of-00500.tfrecord\r\nshard-00074-of-00500.tfrecord shard-00158-of-00500.tfrecord shard-00242-of-00500.tfrecord shard-00326-of-00500.tfrecord shard-00410-of-00500.tfrecord shard-00494-of-00500.tfrecord\r\nshard-00075-of-00500.tfrecord shard-00159-of-00500.tfrecord shard-00243-of-00500.tfrecord shard-00327-of-00500.tfrecord shard-00411-of-00500.tfrecord shard-00495-of-00500.tfrecord\r\nshard-00076-of-00500.tfrecord shard-00160-of-00500.tfrecord shard-00244-of-00500.tfrecord shard-00328-of-00500.tfrecord shard-00412-of-00500.tfrecord shard-00496-of-00500.tfrecord\r\nshard-00077-of-00500.tfrecord shard-00161-of-00500.tfrecord shard-00245-of-00500.tfrecord shard-00329-of-00500.tfrecord shard-00413-of-00500.tfrecord shard-00497-of-00500.tfrecord\r\nshard-00078-of-00500.tfrecord shard-00162-of-00500.tfrecord shard-00246-of-00500.tfrecord shard-00330-of-00500.tfrecord shard-00414-of-00500.tfrecord shard-00498-of-00500.tfrecord\r\nshard-00079-of-00500.tfrecord shard-00163-of-00500.tfrecord shard-00247-of-00500.tfrecord shard-00331-of-00500.tfrecord shard-00415-of-00500.tfrecord shard-00499-of-00500.tfrecord\r\nshard-00080-of-00500.tfrecord shard-00164-of-00500.tfrecord shard-00248-of-00500.tfrecord shard-00332-of-00500.tfrecord shard-00416-of-00500.tfrecord\r\nshard-00081-of-00500.tfrecord shard-00165-of-00500.tfrecord shard-00249-of-00500.tfrecord shard-00333-of-00500.tfrecord shard-00417-of-00500.tfrecord\r\nshard-00082-of-00500.tfrecord shard-00166-of-00500.tfrecord shard-00250-of-00500.tfrecord shard-00334-of-00500.tfrecord shard-00418-of-00500.tfrecord\r\nshard-00083-of-00500.tfrecord shard-00167-of-00500.tfrecord shard-00251-of-00500.tfrecord shard-00335-of-00500.tfrecord shard-00419-of-00500.tfrecord\r\n]0;tum_dbd0378@hkn1990:~/jafar]633;D;0",,terminal_output
|
288 |
+
287,692304,"utils/dataloader.py",0,0,"",python,tab
|
289 |
+
288,696235,"utils/dataloader.py",207,0,"",python,selection_command
|
290 |
+
289,717369,"experiments/convert_knoms_to_arrayrecords.py",0,0,"",python,tab
|
291 |
+
290,718559,"utils/dataloader.py",0,0,"",python,tab
|
292 |
+
291,718955,"experiments/convert_knoms_to_arrayrecords.py",0,0,"",python,tab
|
293 |
+
292,719673,"experiments/convert_knoms_to_arrayrecords.py",0,0,"f",python,content
|
294 |
+
293,719675,"experiments/convert_knoms_to_arrayrecords.py",1,0,"",python,selection_keyboard
|
295 |
+
294,719818,"experiments/convert_knoms_to_arrayrecords.py",1,0,"r",python,content
|
296 |
+
295,719819,"experiments/convert_knoms_to_arrayrecords.py",2,0,"",python,selection_keyboard
|
297 |
+
296,719825,"experiments/convert_knoms_to_arrayrecords.py",2,0,"o",python,content
|
298 |
+
297,719826,"experiments/convert_knoms_to_arrayrecords.py",3,0,"",python,selection_keyboard
|
299 |
+
298,719845,"experiments/convert_knoms_to_arrayrecords.py",3,0,"m",python,content
|
300 |
+
299,719847,"experiments/convert_knoms_to_arrayrecords.py",4,0,"",python,selection_keyboard
|
301 |
+
300,719976,"experiments/convert_knoms_to_arrayrecords.py",4,0," ",python,content
|
302 |
+
301,719978,"experiments/convert_knoms_to_arrayrecords.py",5,0,"",python,selection_keyboard
|
303 |
+
302,720631,"utils/dataloader.py",0,0,"",python,tab
|
304 |
+
303,721423,"experiments/convert_knoms_to_arrayrecords.py",0,0,"",python,tab
|
305 |
+
304,721766,"experiments/convert_knoms_to_arrayrecords.py",5,0,"t",python,content
|
306 |
+
305,721767,"experiments/convert_knoms_to_arrayrecords.py",6,0,"",python,selection_keyboard
|
307 |
+
306,722087,"experiments/convert_knoms_to_arrayrecords.py",5,1,"",python,content
|
308 |
+
307,722215,"experiments/convert_knoms_to_arrayrecords.py",5,0,"u",python,content
|
309 |
+
308,722216,"experiments/convert_knoms_to_arrayrecords.py",6,0,"",python,selection_keyboard
|
310 |
+
309,722293,"experiments/convert_knoms_to_arrayrecords.py",6,0,"t",python,content
|
311 |
+
310,722295,"experiments/convert_knoms_to_arrayrecords.py",7,0,"",python,selection_keyboard
|
312 |
+
311,722410,"experiments/convert_knoms_to_arrayrecords.py",7,0,"i",python,content
|
313 |
+
312,722412,"experiments/convert_knoms_to_arrayrecords.py",8,0,"",python,selection_keyboard
|
314 |
+
313,722478,"experiments/convert_knoms_to_arrayrecords.py",8,0,"l",python,content
|
315 |
+
314,722480,"experiments/convert_knoms_to_arrayrecords.py",9,0,"",python,selection_keyboard
|
316 |
+
315,722571,"experiments/convert_knoms_to_arrayrecords.py",9,0,"s",python,content
|
317 |
+
316,722572,"experiments/convert_knoms_to_arrayrecords.py",10,0,"",python,selection_keyboard
|
318 |
+
317,723122,"experiments/convert_knoms_to_arrayrecords.py",10,0,".dataloader import convert_tfrecords_to_arrayrecords",python,content
|
319 |
+
318,724484,"experiments/convert_knoms_to_arrayrecords.py",62,0,"\n\nconvert_tfrecords_to_arrayrecords(\n tfrecord_folder=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards"",\n output_folder=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards"",\n)",python,content
|
320 |
+
319,724900,"experiments/convert_knoms_to_arrayrecords.py",322,0,"",python,selection_command
|
321 |
+
320,726055,"experiments/convert_knoms_to_arrayrecords.py",210,0,"",python,selection_command
|
322 |
+
321,726254,"experiments/convert_knoms_to_arrayrecords.py",99,0,"",python,selection_command
|
323 |
+
322,726349,"experiments/convert_knoms_to_arrayrecords.py",64,0,"",python,selection_command
|
324 |
+
323,727319,"experiments/convert_knoms_to_arrayrecords.py",99,0,"",python,selection_command
|
325 |
+
324,727441,"experiments/convert_knoms_to_arrayrecords.py",210,0,"",python,selection_command
|
326 |
+
325,727589,"experiments/convert_knoms_to_arrayrecords.py",322,0,"",python,selection_command
|
327 |
+
326,728344,"utils/dataloader.py",0,0,"",python,tab
|
328 |
+
327,729773,"experiments/convert_knoms_to_arrayrecords.py",0,0,"",python,tab
|
329 |
+
328,730178,"experiments/convert_knoms_to_arrayrecords.py",210,0,"",python,selection_command
|
330 |
+
329,730332,"experiments/convert_knoms_to_arrayrecords.py",99,0,"",python,selection_command
|
331 |
+
330,730462,"experiments/convert_knoms_to_arrayrecords.py",103,0,"",python,selection_command
|
332 |
+
331,730710,"experiments/convert_knoms_to_arrayrecords.py",118,0,"",python,selection_command
|
333 |
+
332,730743,"experiments/convert_knoms_to_arrayrecords.py",121,0,"",python,selection_command
|
334 |
+
333,730777,"experiments/convert_knoms_to_arrayrecords.py",125,0,"",python,selection_command
|
335 |
+
334,730809,"experiments/convert_knoms_to_arrayrecords.py",126,0,"",python,selection_command
|
336 |
+
335,730842,"experiments/convert_knoms_to_arrayrecords.py",130,0,"",python,selection_command
|
337 |
+
336,730968,"experiments/convert_knoms_to_arrayrecords.py",131,0,"",python,selection_command
|
338 |
+
337,730972,"experiments/convert_knoms_to_arrayrecords.py",140,0,"",python,selection_command
|
339 |
+
338,731156,"experiments/convert_knoms_to_arrayrecords.py",141,0,"",python,selection_command
|
340 |
+
339,731484,"experiments/convert_knoms_to_arrayrecords.py",148,0,"",python,selection_command
|
341 |
+
340,731488,"experiments/convert_knoms_to_arrayrecords.py",149,0,"",python,selection_command
|
342 |
+
341,731491,"experiments/convert_knoms_to_arrayrecords.py",160,0,"",python,selection_command
|
343 |
+
342,731510,"experiments/convert_knoms_to_arrayrecords.py",161,0,"",python,selection_command
|
344 |
+
343,731574,"experiments/convert_knoms_to_arrayrecords.py",175,0,"",python,selection_command
|
345 |
+
344,731856,"experiments/convert_knoms_to_arrayrecords.py",176,0,"",python,selection_command
|
346 |
+
345,732008,"experiments/convert_knoms_to_arrayrecords.py",180,0,"",python,selection_command
|
347 |
+
346,732384,"experiments/convert_knoms_to_arrayrecords.py",99,0,"",python,selection_command
|
348 |
+
347,732558,"experiments/convert_knoms_to_arrayrecords.py",210,0,"",python,selection_command
|
349 |
+
348,732740,"experiments/convert_knoms_to_arrayrecords.py",214,0,"",python,selection_command
|
350 |
+
349,733080,"experiments/convert_knoms_to_arrayrecords.py",227,0,"",python,selection_command
|
351 |
+
350,733395,"experiments/convert_knoms_to_arrayrecords.py",230,0,"",python,selection_command
|
352 |
+
351,733397,"experiments/convert_knoms_to_arrayrecords.py",234,0,"",python,selection_command
|
353 |
+
352,733401,"experiments/convert_knoms_to_arrayrecords.py",235,0,"",python,selection_command
|
354 |
+
353,733425,"experiments/convert_knoms_to_arrayrecords.py",239,0,"",python,selection_command
|
355 |
+
354,733583,"experiments/convert_knoms_to_arrayrecords.py",240,0,"",python,selection_command
|
356 |
+
355,733586,"experiments/convert_knoms_to_arrayrecords.py",249,0,"",python,selection_command
|
357 |
+
356,733588,"experiments/convert_knoms_to_arrayrecords.py",250,0,"",python,selection_command
|
358 |
+
357,733592,"experiments/convert_knoms_to_arrayrecords.py",257,0,"",python,selection_command
|
359 |
+
358,733659,"experiments/convert_knoms_to_arrayrecords.py",258,0,"",python,selection_command
|
360 |
+
359,733798,"experiments/convert_knoms_to_arrayrecords.py",269,0,"",python,selection_command
|
361 |
+
360,734105,"experiments/convert_knoms_to_arrayrecords.py",270,0,"",python,selection_command
|
362 |
+
361,734108,"experiments/convert_knoms_to_arrayrecords.py",284,0,"",python,selection_command
|
363 |
+
362,734111,"experiments/convert_knoms_to_arrayrecords.py",285,0,"",python,selection_command
|
364 |
+
363,734143,"experiments/convert_knoms_to_arrayrecords.py",289,0,"",python,selection_command
|
365 |
+
364,734212,"experiments/convert_knoms_to_arrayrecords.py",290,0,"",python,selection_command
|
366 |
+
365,734517,"experiments/convert_knoms_to_arrayrecords.py",319,0,"",python,selection_command
|
367 |
+
366,735848,"experiments/convert_knoms_to_arrayrecords.py",210,0,"",python,selection_command
|
368 |
+
367,737136,"experiments/convert_knoms_to_arrayrecords.py",322,0,"",python,selection_command
|
369 |
+
368,755725,"TERMINAL",0,0,"salloc --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=8 --time=00:30:00 --partition=dev_accelerated --account=hk-project-p0023960",,terminal_command
|
370 |
+
369,755842,"TERMINAL",0,0,"]633;E;2025-06-30 21:15:48 salloc --nodes=1 --ntasks-per-node=4 --gres=gpu:4 --cpus-per-task=8 --time=00:30:00 --partition=dev_accelerated --account=hk-project-p0023960;c4a63260-8af4-413c-a66a-959384934181]633;C",,terminal_output
|
371 |
+
370,755937,"TERMINAL",0,0,"salloc: Granted job allocation 3307408\r\nsalloc: Waiting for resource configuration\r\n",,terminal_output
|
372 |
+
371,757528,"TERMINAL",0,0,"",,terminal_focus
|
373 |
+
372,760532,"TERMINAL",0,0,"idle",,terminal_command
|
374 |
+
373,760544,"TERMINAL",0,0,"]633;E;2025-06-30 21:15:53 idle;d2badc07-0e1f-4ab6-8ff4-72828e6d18d3]633;C",,terminal_output
|
375 |
+
374,783015,"TERMINAL",0,0,"salloc: Nodes hkn0401 are ready for job\r\n",,terminal_output
|
376 |
+
375,783866,"TERMINAL",0,0,"]0;tum_dbd0378@hkn0401:~/jafar[?2004h[tum_dbd0378@hkn0401 jafar]$ ",,terminal_output
|
377 |
+
376,788072,"TERMINAL",0,0,"srun",,terminal_focus
|
378 |
+
377,798199,"TERMINAL",0,0,"[?25lb[2ma[22m[2ms[22m[2mh[22m[23;34H[?25h",,terminal_output
|
379 |
+
378,798309,"TERMINAL",0,0,"[?25l[23;31Ha[23;35H[?25h",,terminal_output
|
380 |
+
379,798371,"TERMINAL",0,0,"[?25l[23;32Hs[23;33Hh[23;36H[?25h",,terminal_output
|
381 |
+
380,798469,"TERMINAL",0,0,"[?25l[23;34H [23;38H[?25h",,terminal_output
|
382 |
+
381,798583,"TERMINAL",0,0,"[?25l[23;35He[23;38H[?25h",,terminal_output
|
383 |
+
382,798696,"TERMINAL",0,0,"[?25l[23;36Hx[23;41H[?25h[?25l[23;37Hp[23;41H[?25h",,terminal_output
|
384 |
+
383,798758,"TERMINAL",0,0,"[?25l[23;38He[23;42H[?25h",,terminal_output
|
385 |
+
384,798840,"TERMINAL",0,0,"[?25l[23;39Hr[23;40Hi[23;42H[?25h[?25l[23;41Hm[23;42H[?25h",,terminal_output
|
386 |
+
385,799087,"TERMINAL",0,0,"ents/",,terminal_output
|
387 |
+
386,800917,"TERMINAL",0,0,"[?25l[23;47Hc[23;50H[?25h",,terminal_output
|
388 |
+
387,801197,"TERMINAL",0,0,"[?25l[23;48Ho[23;49Hn[23;53H[?25h[?25l[23;50Hv[23;53H[?25h",,terminal_output
|
389 |
+
388,801362,"TERMINAL",0,0,"[?25l[23;51He[23;52Hr[23;53H[?25h",,terminal_output
|
390 |
+
389,801489,"TERMINAL",0,0,"t_knoms_to_arrayrecords.py ",,terminal_output
|
391 |
+
390,806453,"experiments/tokenizer_knoms_time_measurement.sh",0,0,"",shellscript,tab
|
392 |
+
391,807174,"experiments/convert_knoms_to_arrayrecords.py",0,0,"",python,tab
|
393 |
+
392,808000,"utils/dataloader.py",0,0,"",python,tab
|
394 |
+
393,809191,"utils/dataloader.py",566,0,"",python,selection_command
|
395 |
+
394,809589,"utils/dataloader.py",1056,0,"",python,selection_command
|
396 |
+
395,815508,"experiments/convert_knoms_to_arrayrecords.py",0,0,"",python,tab
|
397 |
+
396,816630,"TERMINAL",0,0,"\r\n[?2004l\rexperiments/convert_knoms_to_arrayrecords.py: line 1: from: command not found\r\nexperiments/convert_knoms_to_arrayrecords.py: line 3: syntax error near unexpected token `newline'\r\nexperiments/convert_knoms_to_arrayrecords.py: line 3: `convert_tfrecords_to_arrayrecords('\r\n]0;tum_dbd0378@hkn0401:~/jafar[?2004h[tum_dbd0378@hkn0401 jafar]$ ",,terminal_output
|
398 |
+
397,820561,"TERMINAL",0,0,"bash experiments/convert_knoms_to_arrayrecords.py \r[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
399 |
+
398,820775,"TERMINAL",0,0,"[C",,terminal_output
|
400 |
+
399,821012,"TERMINAL",0,0,"[C",,terminal_output
|
401 |
+
400,821138,"TERMINAL",0,0,"[C",,terminal_output
|
402 |
+
401,821277,"TERMINAL",0,0,"[C",,terminal_output
|
403 |
+
402,821600,"TERMINAL",0,0,"[4P",,terminal_output
|
404 |
+
403,821911,"TERMINAL",0,0,"[1@p",,terminal_output
|
405 |
+
404,821968,"TERMINAL",0,0,"[1@y",,terminal_output
|
406 |
+
405,822295,"TERMINAL",0,0,"[?25l[23;34H[0mx[23;34H[23;33H[0me[23;33H[23;32H[0m [23;32H[0m[1@t[1@h[?25h",,terminal_output
|
407 |
+
406,822428,"TERMINAL",0,0,"[?25l[23;34H[0m [23;34H[0m[1@o[?25h",,terminal_output
|
408 |
+
407,822585,"TERMINAL",0,0,"[1@n",,terminal_output
|
409 |
+
408,823371,"TERMINAL",0,0,"[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
410 |
+
409,823501,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output
|
411 |
+
410,823810,"TERMINAL",0,0,"Traceback (most recent call last):\r\n File ""/hkfs/home/project/hk-project-p0023960/tum_dbd0378/jafar/experiments/convert_knoms_to_arrayrecords.py"", line 1, in <module>\r\n from utils.dataloader import convert_tfrecords_to_arrayrecords\r\nModuleNotFoundError: No module named 'utils'\r\n]0;tum_dbd0378@hkn0401:~/jafar[?2004h[tum_dbd0378@hkn0401 jafar]$ ",,terminal_output
|
412 |
+
411,826493,"experiments/convert_knoms_to_arrayrecords.py",323,0,"",python,selection_mouse
|
413 |
+
412,826495,"experiments/convert_knoms_to_arrayrecords.py",322,0,"",python,selection_command
|
414 |
+
413,833305,"experiments/convert_knoms_to_arrayrecords.py",0,0,"",python,selection_command
|
415 |
+
414,845395,"experiments/convert_knoms_to_arrayrecords.py",323,0,"",python,selection_mouse
|
416 |
+
415,845398,"experiments/convert_knoms_to_arrayrecords.py",322,0,"",python,selection_command
|
417 |
+
416,845627,"utils/dataloader.py",0,0,"",python,tab
|
418 |
+
417,853654,"convert_knoms_to_arrayrecords.py",0,0,"from utils.dataloader import convert_tfrecords_to_arrayrecords\n\nconvert_tfrecords_to_arrayrecords(\n tfrecord_folder=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_tfrecords_500_shards"",\n output_folder=""/hkfs/work/workspace/scratch/tum_ind3695-jafa_ws_shared/data/knoms_arrayrecords_500_shards"",\n)",python,tab
|
419 |
+
418,854792,"TERMINAL",0,0,"python experiments/convert_knoms_to_arrayrecords.py ",,terminal_output
|
420 |
+
419,854889,"TERMINAL",0,0,"\r[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
421 |
+
420,855710,"TERMINAL",0,0,"[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
422 |
+
421,856887,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_dbd0378@hkn0401:~/jafar[?2004h[tum_dbd0378@hkn0401 jafar]$ ",,terminal_output
|
423 |
+
422,857032,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_dbd0378@hkn0401:~/jafar[?2004h[tum_dbd0378@hkn0401 jafar]$ ",,terminal_output
|
424 |
+
423,857150,"TERMINAL",0,0,"^C[?2004l\r[?2004h[?2004l\r\r\n]0;tum_dbd0378@hkn0401:~/jafar[?2004h[tum_dbd0378@hkn0401 jafar]$ ",,terminal_output
|
425 |
+
424,857624,"TERMINAL",0,0,"python experiments/convert_knoms_to_arrayrecords.py ",,terminal_output
|
426 |
+
425,860331,"TERMINAL",0,0,"\r[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
427 |
+
426,860446,"TERMINAL",0,0,"[C",,terminal_output
|
428 |
+
427,860588,"TERMINAL",0,0,"[C",,terminal_output
|
429 |
+
428,861276,"TERMINAL",0,0,"[C[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
430 |
+
429,861438,"TERMINAL",0,0,"[C",,terminal_output
|
431 |
+
430,861599,"TERMINAL",0,0,"[C",,terminal_output
|
432 |
+
431,862005,"TERMINAL",0,0,"[C",,terminal_output
|
433 |
+
432,862401,"TERMINAL",0,0,"[12P",,terminal_output
|
434 |
+
433,862968,"TERMINAL",0,0,"[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C[C",,terminal_output
|
435 |
+
434,863093,"TERMINAL",0,0,"\r\n[?2004l\r",,terminal_output
|
436 |
+
435,869090,"convert_knoms_to_arrayrecords.py",323,0,"",python,selection_mouse
|
437 |
+
436,869092,"convert_knoms_to_arrayrecords.py",322,0,"",python,selection_command
|
438 |
+
437,869899,"convert_knoms_to_arrayrecords.py",63,0,"",python,selection_mouse
|
439 |
+
438,870638,"convert_knoms_to_arrayrecords.py",90,0,"",python,selection_command
|
440 |
+
439,870891,"utils/dataloader.py",0,0,"",python,tab
|
441 |
+
440,878497,"TERMINAL",0,0,"2025-06-30 21:17:51.580320: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\r\n",,terminal_output
|
442 |
+
441,878783,"TERMINAL",0,0,"WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\r\nE0000 00:00:1751311071.802320 3767971 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\r\nE0000 00:00:1751311071.838973 3767971 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\r\n",,terminal_output
|
443 |
+
442,879233,"TERMINAL",0,0,"W0000 00:00:1751311072.193670 3767971 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751311072.193694 3767971 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751311072.193696 3767971 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\nW0000 00:00:1751311072.193699 3767971 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\r\n",,terminal_output
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-8e7b7877-c553-4d5c-a7c5-433adcd8112b1754287948136-2025_08_04-08.12.35.154/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-97725341-21b1-49e4-a29b-2639a6d35d101753197054575-2025_07_22-17.11.28.789/source.csv
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Sequence,Time,File,RangeOffset,RangeLength,Text,Language,Type
|
2 |
+
1,5,"utils/nn.py",0,0,"import math\nfrom typing import Tuple\nfrom functools import partial\n\nfrom flax import linen as nn\nimport jax\nimport jax.numpy as jnp\nimport einops\n\n\nclass PositionalEncoding(nn.Module):\n """"""https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/JAX/tutorial6/Transformers_and_MHAttention.html""""""\n\n d_model: int # Hidden dimensionality of the input.\n max_len: int = 5000 # Maximum length of a sequence to expect.\n\n def setup(self):\n # Create matrix of [SeqLen, HiddenDim] representing the positional encoding for max_len inputs\n self.pe = jnp.zeros((self.max_len, self.d_model))\n position = jnp.arange(0, self.max_len, dtype=jnp.float32)[:, None]\n div_term = jnp.exp(\n jnp.arange(0, self.d_model, 2) * (-math.log(10000.0) / self.d_model)\n )\n self.pe = self.pe.at[:, 0::2].set(jnp.sin(position * div_term))\n self.pe = self.pe.at[:, 1::2].set(jnp.cos(position * div_term))\n\n def __call__(self, x):\n x = x + self.pe[: x.shape[2]]\n return x\n\n# class STBlock2(nn.Module):\n # dim: int\n # num_heads: int\n # dropout: float\n # param_dtype: jnp.dtype\n # dtype: jnp.dtype\n\n # @nn.remat\n # @nn.compact\n # def __call__(self, x: jax.Array) -> jax.Array:\n # # --- Spatial attention ---\n # z = PositionalEncoding(self.dim)(x)\n # z = nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z)\n # causal_mask = jnp.tri(z.shape[-2])\n # z = nn.MultiHeadAttention(\n # num_heads=self.num_heads,\n # qkv_features=self.dim,\n # dropout_rate=self.dropout,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z, mask=causal_mask)\n # x = x + z\n\n # # --- Temporal attention ---\n # x = x.swapaxes(1, 2)\n # z = PositionalEncoding(self.dim)(x)\n # z = nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z)\n # causal_mask = jnp.tri(z.shape[-2])\n # z = nn.MultiHeadAttention(\n # num_heads=self.num_heads,\n # qkv_features=self.dim,\n # dropout_rate=self.dropout,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z, mask=causal_mask)\n # x = x + z\n # x = x.swapaxes(1, 2)\n\n # # --- Feedforward ---\n # z = nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(x)\n # # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n # z = nn.Dense(\n # self.dim,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(z)\n # z = nn.gelu(z)\n # x = x + z\n\n # return x\n\n# class CausalTransformer(nn.Module):\n # model_dim: int\n # out_dim: int\n # num_blocks: int\n # num_heads: int\n # dropout: float\n # param_dtype: jnp.dtype\n # dtype: jnp.dtype\n\n # @nn.compact\n # def __call__(self, x: jax.Array) -> jax.Array:\n # # Input projection and normalization\n # x = nn.Sequential(\n # [\n # nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # ),\n # nn.Dense(self.model_dim,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # ),\n # nn.LayerNorm(\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # ),\n # ]\n # )(x)\n # # Causal transformer blocks\n # for _ in range(self.num_blocks):\n # x = STBlock2(\n # dim=self.model_dim,\n # num_heads=self.num_heads,\n # dropout=self.dropout,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(x)\n\n # # Output projection\n # x = nn.Dense(\n # self.out_dim,\n # param_dtype=self.param_dtype,\n # dtype=self.dtype,\n # )(x)\n # return x # (B, T, E)\n\n\nclass STBlock(nn.Module):\n dim: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n spatial_bert: bool = True\n\n @nn.remat\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n # --- Spatial attention ---\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=not self.spatial_bert),\n decode=True,\n )(z)\n x = x + z\n\n # --- Temporal attention ---\n x = x.swapaxes(1, 2)\n z = PositionalEncoding(self.dim)(x)\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n causal_mask = jnp.tri(z.shape[-2])\n z = nn.MultiHeadAttention(\n num_heads=self.num_heads,\n qkv_features=self.dim,\n dropout_rate=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n attention_fn=_create_flash_attention_fn(self.use_flash_attention, is_causal=True),\n decode=True,\n # FIXME (f.srambical): check whether we should still pass the mask if we set is_causal=True\n )(z)\n x = x + z\n x = x.swapaxes(1, 2)\n\n # --- Feedforward ---\n z = nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n # FIXME (f.srambical): Here, the attention hidden dimension is the same as the FFN's. Usually, FFN hidden dimension is 4x model_dim\n z = nn.Dense(\n self.dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(z)\n z = nn.gelu(z)\n x = x + z\n\n return x\n\n\nclass STTransformer(nn.Module):\n model_dim: int\n out_dim: int\n num_blocks: int\n num_heads: int\n dropout: float\n param_dtype: jnp.dtype\n dtype: jnp.dtype\n use_flash_attention: bool\n spatial_bert: bool = True\n\n @nn.compact\n def __call__(self, x: jax.Array) -> jax.Array:\n x = nn.Sequential(\n [\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.Dense(self.model_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n nn.LayerNorm(\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n ),\n ]\n )(x)\n for _ in range(self.num_blocks):\n x = STBlock(\n dim=self.model_dim,\n num_heads=self.num_heads,\n dropout=self.dropout,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n spatial_bert=self.spatial_bert,\n use_flash_attention=self.use_flash_attention,\n )(x)\n x = nn.Dense(\n self.out_dim,\n param_dtype=self.param_dtype,\n dtype=self.dtype,\n )(x)\n return x # (B, T, E)\n\n\ndef normalize(x):\n return x / (jnp.linalg.norm(x, ord=2, axis=-1, keepdims=True) + 1e-8)\n\n\nclass VectorQuantizer(nn.Module):\n latent_dim: int\n num_latents: int\n dropout: float\n\n def setup(self):\n self.codebook = normalize(\n self.param(\n ""codebook"",\n nn.initializers.lecun_uniform(),\n (self.num_latents, self.latent_dim),\n )\n )\n self.drop = nn.Dropout(self.dropout, deterministic=False)\n\n def __call__(\n self, x: jax.Array, training: bool\n ) -> Tuple[jax.Array, jax.Array, jax.Array, jax.Array]:\n # --- Compute distances ---\n x = normalize(x)\n codebook = normalize(self.codebook)\n distance = -jnp.matmul(x, codebook.T)\n if training:\n dropout_key = self.make_rng(""dropout"")\n distance = self.drop(distance, rng=dropout_key)\n\n # --- Get indices and embeddings ---\n indices = jnp.argmin(distance, axis=-1)\n z = self.codebook[indices]\n\n # --- Straight through estimator ---\n z_q = x + jax.lax.stop_gradient(z - x)\n return z_q, z, x, indices\n\n def get_codes(self, indices: jax.Array):\n return self.codebook[indices]\n\n\ndef _create_flash_attention_fn(use_flash_attention: bool, is_causal: bool):\n """"""\n Create an attention function that uses flash attention if enabled.\n\n Flax MultiHeadAttention provides tensors with shape (batch..., length, num_heads, head_dim)\n jax.nn.dot_product_attention expects (batch, length, num_heads, head_dim).\n\n We need to reshape to ensure compatibility. cuDNN's flash attention additionally\n requires a sequence length that is a multiple of 4. We pad the sequence length to the nearest\n multiple of 4 and mask accordingly.\n """"""\n \n def attention_fn(query, key, value, bias=None, mask=None, **kwargs):\n implementation = 'cudnn' if use_flash_attention else None\n\n def _rearrange(x):\n return einops.rearrange(x, '... l h d -> (...) l h d')\n def _pad(x):\n return jnp.pad(x, ((0, 0), (0, pad_size), (0, 0), (0, 0)))\n def _fuse_masks(mask: jax.Array, attention_mask: jax.Array) -> jax.Array:\n mask_bool = mask.astype(jnp.bool_)\n expanded_mask = jnp.pad(mask_bool, ((0, pad_size), (0, pad_size)), constant_values=False)\n return jnp.logical_and(attention_mask, expanded_mask)\n \n original_shape = query.shape\n original_seq_len = query.shape[-3]\n \n # Pad to nearest multiple of 4\n target_seq_len = ((original_seq_len + 3) // 4) * 4\n pad_size = target_seq_len - original_seq_len\n\n query_4d = _pad(_rearrange(query))\n key_4d = _pad(_rearrange(key))\n value_4d = _pad(_rearrange(value))\n \n attention_mask = jnp.ones((target_seq_len, target_seq_len), dtype=jnp.bool_)\n attention_mask = attention_mask.at[original_seq_len:, :].set(False)\n attention_mask = attention_mask.at[:, original_seq_len:].set(False)\n\n mask_4d = _fuse_masks(mask, attention_mask) if mask is not None else attention_mask\n mask_4d = mask_4d[jnp.newaxis, jnp.newaxis, :, :] # (1, 1, seq_len, seq_len)\n \n bias_4d = _pad(_rearrange(bias)) if bias is not None else None\n \n output_4d = jax.nn.dot_product_attention(\n query=query_4d,\n key=key_4d,\n value=value_4d,\n bias=bias_4d,\n mask=mask_4d,\n implementation=implementation,\n is_causal=is_causal,\n **kwargs\n )\n return output_4d[..., :original_seq_len, :, :].reshape(original_shape)\n \n return attention_fn\n\n",python,tab
|
3 |
+
2,396,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"5:11:28 PM [info] Activating crowd-code\n5:11:28 PM [info] Recording started\n5:11:28 PM [info] Initializing git provider using file system watchers...\n",Log,tab
|
4 |
+
3,587,"extension-output-pdoom-org.crowd-code-#1-crowd-code",150,0,"5:11:29 PM [info] Git repository found\n5:11:29 PM [info] Git provider initialized successfully\n5:11:29 PM [info] Initial git state: [object Object]\n",Log,content
|
5 |
+
4,912477,"utils/nn.py",0,0,"",python,tab
|
6 |
+
5,912492,"extension-output-pdoom-org.crowd-code-#1-crowd-code",0,0,"",Log,tab
|
7 |
+
6,912499,"utils/nn.py",0,0,"",python,tab
|
8 |
+
7,912533,"TERMINAL",0,0,"bash",,terminal_focus
|
9 |
+
8,915012,"TERMINAL",0,0,"pre-commit install",,terminal_command
|
10 |
+
9,915044,"TERMINAL",0,0,"]633;Cbash: pre-commit: command not found...\r\n",,terminal_output
|
11 |
+
10,916100,"TERMINAL",0,0,"^C\r\n]0;tum_dbd0378@hkn1990:~/jafar",,terminal_output
|
12 |
+
11,917728,"TERMINAL",0,0,"source /home/hk-project-p0023960/tum_dbd0378/jafar/.venv/bin/activate",,terminal_command
|
13 |
+
12,917739,"TERMINAL",0,0,"]633;C]0;tum_dbd0378@hkn1990:~/jafar",,terminal_output
|
14 |
+
13,922878,"TERMINAL",0,0,"pre-commit install",,terminal_command
|
15 |
+
14,922946,"TERMINAL",0,0,"]633;Cbash: pre-commit: command not found...\r\n",,terminal_output
|
16 |
+
15,923049,"TERMINAL",0,0,"]0;tum_dbd0378@hkn1990:~/jafar",,terminal_output
|
17 |
+
16,939250,"TERMINAL",0,0,"uv pip install pre-commit",,terminal_command
|
18 |
+
17,939302,"TERMINAL",0,0,"]633;C",,terminal_output
|
19 |
+
18,939510,"TERMINAL",0,0,"[37m⠋[0m [2mResolving dependencies... [0m\r[2K[37m⠙[0m [2mResolving dependencies... [0m\r[2K[37m⠋[0m [2mResolving dependencies... [0m\r[2K[37m⠙[0m [2mResolving dependencies... [0m",,terminal_output
|
20 |
+
19,939709,"TERMINAL",0,0,"\r[2K[37m⠙[0m [2mpre-commit==4.2.0 [0m\r[2K[37m⠹[0m [2mpre-commit==4.2.0 [0m",,terminal_output
|
21 |
+
20,940142,"TERMINAL",0,0,"\r[2K[37m⠹[0m [2mcfgv==3.4.0 [0m\r[2K[37m⠹[0m [2midentify==2.6.12 [0m\r[2K[37m⠹[0m [2mnodeenv==1.9.1 [0m\r[2K[37m⠹[0m [2mpyyaml==6.0.2 [0m\r[2K[37m⠹[0m [2mvirtualenv==20.32.0 [0m\r[2K[37m⠹[0m [2mdistlib==0.4.0 [0m\r[2K[37m⠸[0m [2mdistlib==0.4.0 [0m\r[2K[37m⠸[0m [2mfilelock==3.18.0 [0m\r[2K[37m⠸[0m [2mplatformdirs==4.3.8 [0m\r[2K[37m⠸[0m [2m [0m\r[2K[2mResolved [1m9 packages[0m [2min 427ms[0m[0m\r\n[37m⠋[0m [2mPreparing packages...[0m (0/0) \r[2K[37m⠋[0m [2mPreparing packages...[0m (0/6) \r[2K[37m⠙[0m [2mPreparing packages...[0m (0/6) \r[2K[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m[2m------------------------------[0m[0m 0 B/21.79 KiB [1A\r[2K[1B\r[2K[1A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m[2m------------------------------[0m[0m 0 B/21.79 KiB [1A\r[2K[1B\r[2K[1A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m[2m------------------------------[0m[0m 0 B/21.79 KiB\r\n[2midentify [0m [32m[2m------------------------------[0m[0m 0 B/96.82 KiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m[2m------------------------------[0m[0m 0 B/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m[2m------------------------------[0m[0m 0 B/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m[2m------------------------------[0m[0m 0 B/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m[2m------------------------------[0m[0m 0 B/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m[2m------------------------------[0m[0m 0 B/215.53 KiB [3A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[3A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m[2m------------------------------[0m[0m 0 B/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB [3A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[3A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB [3A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[3A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB [3A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[3A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB [3A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[3A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB [3A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[3A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m[2m------------------------------[0m[0m 0 B/458.05 KiB [4A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[4A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m[2m------------------------------[0m[0m 0 B/458.05 KiB [4A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[4A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m[2m------------------------------[0m[0m 0 B/458.05 KiB [4A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[4A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m[2m------------------------------[0m[0m 0 B/458.05 KiB [4A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[4A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m-----------------------[2m-------[0m[0m 16.00 KiB/21.79 KiB\r\n[2midentify [0m [32m-----[2m-------------------------[0m[0m 14.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m[2m------------------------------[0m[0m 0 B/458.05 KiB [4A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[4A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mcfgv [0m [32m------------------------------[2m[0m[0m 7.08 KiB/7.08 KiB\r\n[2mnodeenv [0m [32m------------------------------[2m[0m[0m 21.79 KiB/21.79 KiB\r\n[2midentify [0m [32m------------------------------[2m[0m[0m 94.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m-----[2m-------------------------[0m[0m 62.88 KiB/458.05 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 14.88 KiB/5.78 MiB [6A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[6A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mnodeenv [0m [32m------------------------------[2m[0m[0m 21.79 KiB/21.79 KiB\r\n[2midentify [0m [32m------------------------------[2m[0m[0m 94.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m-----[2m-------------------------[0m[0m 62.88 KiB/458.05 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 14.88 KiB/5.78 MiB [5A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[5A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2midentify [0m [32m------------------------------[2m[0m[0m 94.88 KiB/96.82 KiB\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m-----[2m-------------------------[0m[0m 62.88 KiB/458.05 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 14.88 KiB/5.78 MiB [4A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[4A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mpre-commit [0m [32m---[2m---------------------------[0m[0m 14.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m---------[2m---------------------[0m[0m 125.74 KiB/458.05 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 14.88 KiB/5.78 MiB [3A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[3A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mpre-commit [0m [32m-----[2m-------------------------[0m[0m 30.91 KiB/215.53 KiB\r\n[2mdistlib [0m [32m-------------------[2m-----------[0m[0m 285.74 KiB/458.05 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 14.88 KiB/5.78 MiB [3A\r[2K[1B\r[2K[1B\r[2K[1B\r[2K[3A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mpre-commit [0m [32m-------[2m-----------------------[0m[0m 46.91 KiB/215.53 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 14.88 KiB/5.78 MiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠙[0m [2mPreparing packages...[0m (0/6)\r\n[2mpre-commit [0m [32m--------[2m----------------------[0m[0m 56.72 KiB/215.53 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 30.88 KiB/5.78 MiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠹[0m [2mPreparing packages...[0m (4/6)\r\n[2mpre-commit [0m [32m--------[2m----------------------[0m[0m 56.72 KiB/215.53 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 46.88 KiB/5.78 MiB ",,terminal_output
|
22 |
+
21,940466,"TERMINAL",0,0,"[2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠹[0m [2mPreparing packages...[0m (4/6)\r\n[2mpre-commit [0m [32m-----------[2m-------------------[0m[0m 76.90 KiB/215.53 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 46.88 KiB/5.78 MiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠹[0m [2mPreparing packages...[0m (4/6)\r\n[2mpre-commit [0m [32m-----------[2m-------------------[0m[0m 76.90 KiB/215.53 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 58.58 KiB/5.78 MiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠹[0m [2mPreparing packages...[0m (4/6)\r\n[2mpre-commit [0m [32m----------------[2m--------------[0m[0m 108.90 KiB/215.53 KiB\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 74.58 KiB/5.78 MiB [2A\r[2K[1B\r[2K[1B\r[2K[2A[37m⠹[0m [2mPreparing packages...[0m (4/6)\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 92.46 KiB/5.78 MiB [1A\r[2K[1B\r[2K[1A[37m⠸[0m [2mPreparing packages...[0m (5/6)\r\n[2mvirtualenv [0m [32m-[2m-----------------------------[0m[0m 92.46 KiB/5.78 MiB [1A\r[2K[1B\r[2K[1A[37m⠸[0m [2mPreparing packages...[0m (5/6)\r\n[2mvirtualenv [0m [32m------------------[2m------------[0m[0m 3.35 MiB/5.78 MiB [1A\r[2K[1B\r[2K[1A[37m⠸[0m [2mPreparing packages...[0m (5/6)\r\n[2mvirtualenv [0m [32m------------------------------[2m[0m[0m 5.78 MiB/5.78 MiB [1A\r[2K[1B\r[2K[1A[37m���[0m [2mPreparing packages...[0m (5/6) \r[2K[2mPrepared [1m6 packages[0m [2min 525ms[0m[0m\r\n░░░░░░░░░░░░░░░░░░░░ [0/0] [2mInstalling wheels... [0m\r[2K░░░░░░░░░░░░░░░░░░░░ [0/6] [2mInstalling wheels... [0m",,terminal_output
|
23 |
+
22,940608,"TERMINAL",0,0,"\r[2K░░░░░░░░░░░░░░░░░░░░ [0/6] [2mcfgv==3.4.0 [0m\r[2K███░░░░░░░░░░░░░░░░░ [1/6] [2mcfgv==3.4.0 [0m\r[2K███░░░░░░░░░░░░░░░░░ [1/6] [2mdistlib==0.4.0 [0m\r[2K██████░░░░░░░░░░░░░░ [2/6] [2mdistlib==0.4.0 [0m\r[2K██████░░░░░░░░░░░░░░ [2/6] [2mnodeenv==1.9.1 [0m\r[2K██████████░░░░░░░░░░ [3/6] [2mnodeenv==1.9.1 [0m\r[2K██████████░░░░░░░░░░ [3/6] [2midentify==2.6.12 [0m\r[2K█████████████░░░░░░░ [4/6] [2midentify==2.6.12 [0m\r[2K█████████████░░░░░░░ [4/6] [2mpre-commit==4.2.0 [0m\r[2K████████████████░░░░ [5/6] [2mpre-commit==4.2.0 [0m\r[2K████████████████░░░░ [5/6] [2mvirtualenv==20.32.0 [0m\r[2K████████████████████ [6/6] [2mvirtualenv==20.32.0 [0m\r[2K[2mInstalled [1m6 packages[0m [2min 169ms[0m[0m\r\n [32m+[39m [1mcfgv[0m[2m==3.4.0[0m\r\n [32m+[39m [1mdistlib[0m[2m==0.4.0[0m\r\n [32m+[39m [1midentify[0m[2m==2.6.12[0m\r\n [32m+[39m [1mnodeenv[0m[2m==1.9.1[0m\r\n [32m+[39m [1mpre-commit[0m[2m==4.2.0[0m\r\n [32m+[39m [1mvirtualenv[0m[2m==20.32.0[0m\r\n]0;tum_dbd0378@hkn1990:~/jafar",,terminal_output
|
24 |
+
23,1797346,"TERMINAL",0,0,"salloc",,terminal_focus
|
25 |
+
24,1800772,"utils/nn.py",5109,0,"",python,selection_command
|
26 |
+
25,1801009,"utils/nn.py",5146,0,"",python,selection_command
|
27 |
+
26,1801038,"utils/nn.py",5175,0,"",python,selection_command
|
28 |
+
27,1801150,"utils/nn.py",5219,0,"",python,selection_command
|
29 |
+
28,1801155,"utils/nn.py",5245,0,"",python,selection_command
|
30 |
+
29,1801158,"utils/nn.py",5287,0,"",python,selection_command
|
31 |
+
30,1801171,"utils/nn.py",5317,0,"",python,selection_command
|
32 |
+
31,1801204,"utils/nn.py",5330,0,"",python,selection_command
|
33 |
+
32,1801237,"utils/nn.py",5373,0,"",python,selection_command
|
34 |
+
33,1806976,"utils/nn.py",5408,0,"",python,selection_command
|
35 |
+
34,1806979,"utils/nn.py",5446,0,"",python,selection_command
|
36 |
+
35,1806985,"utils/nn.py",5481,0,"",python,selection_command
|
37 |
+
36,1806987,"utils/nn.py",5520,0,"",python,selection_command
|
38 |
+
37,1806988,"utils/nn.py",5562,0,"",python,selection_command
|
39 |
+
38,1806990,"utils/nn.py",5592,0,"",python,selection_command
|
40 |
+
39,1806992,"utils/nn.py",5687,0,"",python,selection_command
|
41 |
+
40,1806992,"utils/nn.py",5712,0,"",python,selection_command
|
42 |
+
41,1806994,"utils/nn.py",5812,0,"",python,selection_command
|
43 |
+
42,1806995,"utils/nn.py",5825,0,"",python,selection_command
|
44 |
+
43,1806997,"utils/nn.py",5843,0,"",python,selection_command
|
45 |
+
44,1807000,"utils/nn.py",5825,0,"",python,selection_command
|
46 |
+
45,1807001,"utils/nn.py",5812,0,"",python,selection_command
|
47 |
+
46,1807002,"utils/nn.py",5712,0,"",python,selection_command
|
48 |
+
47,1807003,"utils/nn.py",5687,0,"",python,selection_command
|
49 |
+
48,1807005,"utils/nn.py",5592,0,"",python,selection_command
|
50 |
+
49,1807006,"utils/nn.py",5604,0,"",python,selection_command
|
1de052c516cab686515c107385aaf7c3a7e3e5c23c9bc3c0be0cff3df28cd64d/crowd-code-9837d221-0431-4205-9f56-6cdee8f0c0181752692224585-2025_07_16-20.57.34.901/source.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|