Muennighoff commited on
Commit
b14d48f
·
1 Parent(s): e467b8d
sbatch_4b284bc4perplexity.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --nodes=32
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --cpus-per-task=40
5
+ #SBATCH --mem=256G
6
+ #SBATCH -p standard-g
7
+ #SBATCH -t 48:00:00
8
+ #SBATCH --gpus-per-node=mi250:8
9
+ #SBATCH --exclusive=user
10
+ #SBATCH --hint=nomultithread
11
+ #SBATCH --account=project_462000119
12
+ #SBATCH -o logs/%j.out
13
+ #SBATCH -e logs/%j.err
14
+
15
+
16
+ VARIANT=4b284bc4perplexity
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+ DATA_PATH="/scratch/project_462000119/data/c4perplexity/gpt2tok_perplexity_text_document"
39
+ TRAIN_DATA_PATH=trainperplexity.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4perplexity/gpt2tok_perplexity_text_document"
41
+ VALID_DATA_PATH=val.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_2980M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284bc4perplexity25.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --nodes=32
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --cpus-per-task=40
5
+ #SBATCH --mem=256G
6
+ #SBATCH -p standard-g
7
+ #SBATCH -t 48:00:00
8
+ #SBATCH --gpus-per-node=mi250:8
9
+ #SBATCH --exclusive=user
10
+ #SBATCH --hint=nomultithread
11
+ #SBATCH --account=project_462000119
12
+ #SBATCH -o logs/%j.out
13
+ #SBATCH -e logs/%j.err
14
+
15
+
16
+ VARIANT=4b284bc4perplexity25
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+ DATA_PATH="/scratch/project_462000119/data/c4perplexity/gpt2tok_perplexity_text_document"
39
+ TRAIN_DATA_PATH=trainperplexity25.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4perplexity/gpt2tok_c4_en_pplx_25_text_document"
41
+ VALID_DATA_PATH=val.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_2980M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
sbatch_4b284bc4perplexity50.sh ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #SBATCH --nodes=32
3
+ #SBATCH --ntasks-per-node=1
4
+ #SBATCH --cpus-per-task=40
5
+ #SBATCH --mem=256G
6
+ #SBATCH -p standard-g
7
+ #SBATCH -t 48:00:00
8
+ #SBATCH --gpus-per-node=mi250:8
9
+ #SBATCH --exclusive=user
10
+ #SBATCH --hint=nomultithread
11
+ #SBATCH --account=project_462000119
12
+ #SBATCH -o logs/%j.out
13
+ #SBATCH -e logs/%j.err
14
+
15
+
16
+ VARIANT=4b284bc4perplexity50
17
+
18
+ # if run without sbatch, invoke here
19
+ if [ -z $SLURM_JOB_ID ]; then
20
+ mkdir -p logs
21
+ sbatch "$0"
22
+ exit
23
+ fi
24
+
25
+ set -euo pipefail
26
+
27
+ # symlink logs/latest.out and logs/latest.err
28
+ ln -f -s $SLURM_JOB_ID.out logs/latest.out
29
+ ln -f -s $SLURM_JOB_ID.err logs/latest.err
30
+
31
+ KILL_SWITCH_PATH=kill-switch-$VARIANT
32
+ CHECKPOINT_PATH=checkpoints_$VARIANT
33
+ TENSORBOARD_PATH=tensorboard_$VARIANT
34
+
35
+ # Data
36
+ VOCAB_FILE="gpt2/vocab.json"
37
+ MERGE_FILE="gpt2/merges.txt"
38
+ DATA_PATH="/scratch/project_462000119/data/c4perplexity/gpt2tok_perplexity_text_document"
39
+ TRAIN_DATA_PATH=trainperplexity50.txt
40
+ # "train: 1.0 0:1 /scratch/project_462000119/data/c4perplexity/gpt2tok_c4_en_pplx_50_text_document"
41
+ VALID_DATA_PATH=val.txt
42
+ # "validation: 1.0 0:1 /scratch/project_462000119/data/c4_validation/gpt2tok_c4validation_rerun_text_document"
43
+
44
+ PP_SIZE=1
45
+ TP_SIZE=2
46
+
47
+ MICRO_BATCH_SIZE=2
48
+ GRADIENT_ACCUMULATION_STEPS=1
49
+ WORLD_SIZE=$((SLURM_GPUS_ON_NODE*SLURM_JOB_NUM_NODES))
50
+ GLOBAL_BATCH_SIZE=$((MICRO_BATCH_SIZE*WORLD_SIZE*GRADIENT_ACCUMULATION_STEPS))
51
+
52
+ # Model parameters
53
+ source model_params.sh
54
+ MODEL_PARAM=("${PARAM_2980M[@]}")
55
+ NHIDDEN=${MODEL_PARAM[0]}
56
+ FFN_HIDDEN_SIZE=${MODEL_PARAM[1]}
57
+ KV_SIZE=${MODEL_PARAM[2]}
58
+ NHEADS=${MODEL_PARAM[3]}
59
+ NLAYERS=${MODEL_PARAM[4]}
60
+ SEQ_LEN=2048
61
+
62
+ echo "Model parameters: d_model $NHIDDEN ffw_size $FFN_HIDDEN_SIZE kv_size $KV_SIZE n_heads $NHEADS n_layers $NLAYERS"
63
+
64
+ SAVE_INTERVAL=10000
65
+
66
+ # Tokens: 84_000_000_000
67
+ # -> Samples: 41_015_625.0
68
+ TRAIN_SAMPLES=41_015_625
69
+
70
+ OPTIMIZER_ARGS=" \
71
+ --optimizer adam \
72
+ --adam-beta1 0.9 \
73
+ --adam-beta2 0.95 \
74
+ --adam-eps 1e-8 \
75
+ --lr 2e-4 \
76
+ --min-lr 2e-5 \
77
+ --lr-decay-style cosine \
78
+ --lr-decay-samples $TRAIN_SAMPLES \
79
+ --lr-warmup-samples 410_156 \
80
+ --clip-grad 1.0 \
81
+ --weight-decay 1e-1 \
82
+ "
83
+
84
+ GPT_ARGS=" \
85
+ --num-layers $NLAYERS \
86
+ --hidden-size $NHIDDEN \
87
+ --num-attention-heads $NHEADS \
88
+ --kv-channels $KV_SIZE \
89
+ --ffn-hidden-size $FFN_HIDDEN_SIZE \
90
+ --seq-length $SEQ_LEN \
91
+ --max-position-embeddings $SEQ_LEN \
92
+ --micro-batch-size $MICRO_BATCH_SIZE \
93
+ --global-batch-size $GLOBAL_BATCH_SIZE \
94
+ --train-samples $TRAIN_SAMPLES \
95
+ --vocab-file $VOCAB_FILE \
96
+ --merge-file $MERGE_FILE \
97
+ --clip-grad 1.0 \
98
+ --kill-switch-path $KILL_SWITCH_PATH \
99
+ --bf16 \
100
+ $OPTIMIZER_ARGS \
101
+ "
102
+
103
+ OUTPUT_ARGS=" \
104
+ --log-interval 10 \
105
+ --save-interval $SAVE_INTERVAL \
106
+ --eval-interval 1000 \
107
+ --eval-iters 100 \
108
+ --tensorboard-dir $TENSORBOARD_PATH \
109
+ --tensorboard-queue-size 5 \
110
+ --log-timers-to-tensorboard \
111
+ --log-batch-size-to-tensorboard \
112
+ --log-validation-ppl-to-tensorboard \
113
+ "
114
+
115
+ ZERO_STAGE=0
116
+
117
+ mkdir -p ds_configs
118
+ DS_CONFIG_PATH="ds_configs/$SLURM_JOB_ID.json"
119
+
120
+ cat <<EOF > $DS_CONFIG_PATH
121
+ {
122
+ "train_micro_batch_size_per_gpu": $MICRO_BATCH_SIZE,
123
+ "train_batch_size": $GLOBAL_BATCH_SIZE,
124
+ "gradient_clipping": 1.0,
125
+ "zero_optimization": {
126
+ "stage": $ZERO_STAGE
127
+ },
128
+ "bf16": {
129
+ "enabled": true
130
+ },
131
+ "steps_per_print": 2000,
132
+ "wall_clock_breakdown": false
133
+ }
134
+ EOF
135
+
136
+ DEEPSPEED_ARGS=" \
137
+ --deepspeed \
138
+ --deepspeed_config $DS_CONFIG_PATH \
139
+ --zero-stage $ZERO_STAGE \
140
+ "
141
+
142
+ CMD=" \
143
+ Megatron-DeepSpeed/pretrain_gpt.py \
144
+ --tensor-model-parallel-size $TP_SIZE \
145
+ --pipeline-model-parallel-size $PP_SIZE \
146
+ $GPT_ARGS \
147
+ $OUTPUT_ARGS \
148
+ --save $CHECKPOINT_PATH \
149
+ --load $CHECKPOINT_PATH \
150
+ --train-weighted-split-paths-path $TRAIN_DATA_PATH \
151
+ --valid-weighted-split-paths-path $VALID_DATA_PATH \
152
+ --data-impl mmap \
153
+ $DEEPSPEED_ARGS \
154
+ "
155
+
156
+ echo $CMD
157
+
158
+ echo "START $SLURM_JOBID: $(date)"
159
+
160
+ # bash launch_srun.sh $CMD
161
+ srun --label launch.sh $CMD
162
+
163
+ echo "END $SLURM_JOBID: $(date)"
tensorboard/tensorboard_4b284bc4perplexity25/events.out.tfevents.1675983088.nid005401.17305.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d8ad51d2662fc24895413e38dcd9c514dfe6b0fc3ecf474882ce7c36ccde171
3
+ size 40
tensorboard/tensorboard_4b284bc4perplexity25/events.out.tfevents.1675983591.nid005695.45960.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba73df888c7bec444c353e96219d2c90b6468f7e48cde329cf5961c7804ca406
3
+ size 40
tensorboard/tensorboard_4b284bc4perplexity25/events.out.tfevents.1675984102.nid005695.54181.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c6c6d0f8ceb3fe6aa0d763830030d10e7ed3fc140d42c4ff4833720ff762d93
3
+ size 143659632
tensorboard/tensorboard_4b284bc4perplexity25/events.out.tfevents.1676144755.nid005695.111249.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d37e79432b7f773b71ab04e8b788cd5ea8a709903f18d2534ab7408bdc740d56
3
+ size 40
tensorboard/tensorboard_4b284bc4perplexity25/events.out.tfevents.1676145256.nid005695.119215.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5fa5ba5d3ec5ab32f5eb2e60176ab4bfeb7acb60b5c7285d007f4e978896285
3
+ size 40
tensorboard/tensorboard_4b284bc4perplexity50/events.out.tfevents.1675975712.nid005946.123671.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbc8cbe3c1e343f0646a8899ed0ba1bab44da4d75264e8f0b3f583b15f459be7
3
+ size 40
tensorboard/tensorboard_4b284bc4perplexity50/events.out.tfevents.1675976219.nid006070.128633.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b10d68963f8bf0785210f9b4a8d2ded741e241c2e388f2aca7eb7b55ea20063
3
+ size 143659632
tensorboard/tensorboard_4b284bc4perplexity50/events.out.tfevents.1676137082.nid006070.59097.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:456b9314a080fce1ffe7b780981b8391f90df85592eb21e2aec34f5fb101498d
3
+ size 40
tensorboard/tensorboard_4b284bc4perplexity50/events.out.tfevents.1676137586.nid006070.64809.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6dc614a7079a07d00c2ee252a8faa5acabca95d514be7f04a4fcf973003387f
3
+ size 40
tensorboard/tensorboard_4b284bc4perplexity50/events.out.tfevents.1676138087.nid006070.73224.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a61a6b497e46f1f6e28fb065b247136949b8d224bfc622e5daa828102a245f50
3
+ size 40