Upload folder using huggingface_hub
Browse files- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log +113 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt +67 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log +113 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt +67 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log +113 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt +67 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log +114 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt +67 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints/epoch_1.pt +3 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints/epoch_2.pt +3 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints/epoch_3.pt +3 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log +240 -0
- data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt +67 -0
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2024-11-18,16:47:17 | INFO | Running in distributed mode with multiple processes. Device: cuda:0.Process (global: 0, local 0), total 8.
|
| 2 |
+
2024-11-18,16:47:17 | INFO | Loading ViT-L-14-336 model config.
|
| 3 |
+
2024-11-18,16:47:20 | INFO | Loading pretrained ViT-L-14-336 weights (data/openclip-vit-14-336/openclip_model.pt).
|
| 4 |
+
2024-11-18,16:47:28 | INFO | Model:
|
| 5 |
+
2024-11-18,16:47:28 | INFO | CLIP(
|
| 6 |
+
(visual): VisualTransformer(
|
| 7 |
+
(conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14), bias=False)
|
| 8 |
+
(ln_pre): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 9 |
+
(transformer): Transformer(
|
| 10 |
+
(resblocks): ModuleList(
|
| 11 |
+
(0-23): 24 x ResidualAttentionBlock(
|
| 12 |
+
(attn): MultiheadAttention(
|
| 13 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=1024, out_features=1024, bias=True)
|
| 14 |
+
)
|
| 15 |
+
(ln_1): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 16 |
+
(mlp): Sequential(
|
| 17 |
+
(c_fc): Linear(in_features=1024, out_features=4096, bias=True)
|
| 18 |
+
(gelu): QuickGELU()
|
| 19 |
+
(c_proj): Linear(in_features=4096, out_features=1024, bias=True)
|
| 20 |
+
)
|
| 21 |
+
(ln_2): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
)
|
| 25 |
+
(ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 26 |
+
)
|
| 27 |
+
(transformer): Transformer(
|
| 28 |
+
(resblocks): ModuleList(
|
| 29 |
+
(0-11): 12 x ResidualAttentionBlock(
|
| 30 |
+
(attn): MultiheadAttention(
|
| 31 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)
|
| 32 |
+
)
|
| 33 |
+
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 34 |
+
(mlp): Sequential(
|
| 35 |
+
(c_fc): Linear(in_features=768, out_features=3072, bias=True)
|
| 36 |
+
(gelu): QuickGELU()
|
| 37 |
+
(c_proj): Linear(in_features=3072, out_features=768, bias=True)
|
| 38 |
+
)
|
| 39 |
+
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
(token_embedding): Embedding(49408, 768)
|
| 44 |
+
(ln_final): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 45 |
+
)
|
| 46 |
+
2024-11-18,16:47:28 | INFO | Params:
|
| 47 |
+
2024-11-18,16:47:28 | INFO | batch_size: 64
|
| 48 |
+
2024-11-18,16:47:28 | INFO | beta1: 0.9
|
| 49 |
+
2024-11-18,16:47:28 | INFO | beta2: 0.98
|
| 50 |
+
2024-11-18,16:47:28 | INFO | checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 51 |
+
2024-11-18,16:47:28 | INFO | copy_codebase: False
|
| 52 |
+
2024-11-18,16:47:28 | INFO | csv_caption_key: caption
|
| 53 |
+
2024-11-18,16:47:28 | INFO | csv_hard_captions_key: neg_caption
|
| 54 |
+
2024-11-18,16:47:28 | INFO | csv_img_key: img_path
|
| 55 |
+
2024-11-18,16:47:28 | INFO | csv_separator: ,
|
| 56 |
+
2024-11-18,16:47:28 | INFO | dataset_resampled: False
|
| 57 |
+
2024-11-18,16:47:28 | INFO | dataset_type: csv
|
| 58 |
+
2024-11-18,16:47:28 | INFO | ddp_static_graph: False
|
| 59 |
+
2024-11-18,16:47:28 | INFO | debug: False
|
| 60 |
+
2024-11-18,16:47:28 | INFO | device: cuda:0
|
| 61 |
+
2024-11-18,16:47:28 | INFO | dist_backend: nccl
|
| 62 |
+
2024-11-18,16:47:28 | INFO | dist_url: env://
|
| 63 |
+
2024-11-18,16:47:28 | INFO | distributed: True
|
| 64 |
+
2024-11-18,16:47:28 | INFO | epochs: 3
|
| 65 |
+
2024-11-18,16:47:28 | INFO | eps: 1e-06
|
| 66 |
+
2024-11-18,16:47:28 | INFO | force_quick_gelu: True
|
| 67 |
+
2024-11-18,16:47:28 | INFO | gather_with_grad: False
|
| 68 |
+
2024-11-18,16:47:28 | INFO | grad_checkpointing: False
|
| 69 |
+
2024-11-18,16:47:28 | INFO | horovod: False
|
| 70 |
+
2024-11-18,16:47:28 | INFO | imagenet_v2: None
|
| 71 |
+
2024-11-18,16:47:28 | INFO | imagenet_val: None
|
| 72 |
+
2024-11-18,16:47:28 | INFO | local_loss: False
|
| 73 |
+
2024-11-18,16:47:28 | INFO | local_rank: 0
|
| 74 |
+
2024-11-18,16:47:28 | INFO | lock_image: False
|
| 75 |
+
2024-11-18,16:47:28 | INFO | lock_image_freeze_bn_stats: False
|
| 76 |
+
2024-11-18,16:47:28 | INFO | lock_image_unlocked_groups: 0
|
| 77 |
+
2024-11-18,16:47:28 | INFO | log_level: 20
|
| 78 |
+
2024-11-18,16:47:28 | INFO | log_local: False
|
| 79 |
+
2024-11-18,16:47:28 | INFO | log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 80 |
+
2024-11-18,16:47:28 | INFO | logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 81 |
+
2024-11-18,16:47:28 | INFO | lr: 5e-06
|
| 82 |
+
2024-11-18,16:47:28 | INFO | model: ViT-L-14-336
|
| 83 |
+
2024-11-18,16:47:28 | INFO | name: 2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 84 |
+
2024-11-18,16:47:28 | INFO | no_set_device_rank: False
|
| 85 |
+
2024-11-18,16:47:28 | INFO | norm_gradient_clip: None
|
| 86 |
+
2024-11-18,16:47:28 | INFO | precision: amp
|
| 87 |
+
2024-11-18,16:47:28 | INFO | pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 88 |
+
2024-11-18,16:47:28 | INFO | pretrained_image: False
|
| 89 |
+
2024-11-18,16:47:28 | INFO | rank: 0
|
| 90 |
+
2024-11-18,16:47:28 | INFO | report_to: wandb
|
| 91 |
+
2024-11-18,16:47:28 | INFO | resume: None
|
| 92 |
+
2024-11-18,16:47:28 | INFO | save_frequency: 1
|
| 93 |
+
2024-11-18,16:47:28 | INFO | save_most_recent: False
|
| 94 |
+
2024-11-18,16:47:28 | INFO | seed: 0
|
| 95 |
+
2024-11-18,16:47:28 | INFO | skip_scheduler: False
|
| 96 |
+
2024-11-18,16:47:28 | INFO | tensorboard: False
|
| 97 |
+
2024-11-18,16:47:28 | INFO | tensorboard_path:
|
| 98 |
+
2024-11-18,16:47:28 | INFO | torchscript: False
|
| 99 |
+
2024-11-18,16:47:28 | INFO | trace: False
|
| 100 |
+
2024-11-18,16:47:28 | INFO | train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 101 |
+
2024-11-18,16:47:28 | INFO | train_num_samples: None
|
| 102 |
+
2024-11-18,16:47:28 | INFO | use_bn_sync: False
|
| 103 |
+
2024-11-18,16:47:28 | INFO | val_data: None
|
| 104 |
+
2024-11-18,16:47:28 | INFO | val_frequency: 1
|
| 105 |
+
2024-11-18,16:47:28 | INFO | val_num_samples: None
|
| 106 |
+
2024-11-18,16:47:28 | INFO | wandb: True
|
| 107 |
+
2024-11-18,16:47:28 | INFO | wandb_notes:
|
| 108 |
+
2024-11-18,16:47:28 | INFO | wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 109 |
+
2024-11-18,16:47:28 | INFO | warmup: 0
|
| 110 |
+
2024-11-18,16:47:28 | INFO | wd: 0.1
|
| 111 |
+
2024-11-18,16:47:28 | INFO | workers: 4
|
| 112 |
+
2024-11-18,16:47:28 | INFO | world_size: 8
|
| 113 |
+
2024-11-18,16:47:28 | INFO | zeroshot_frequency: 2
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size: 64
|
| 2 |
+
beta1: 0.9
|
| 3 |
+
beta2: 0.98
|
| 4 |
+
checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 5 |
+
copy_codebase: False
|
| 6 |
+
csv_caption_key: caption
|
| 7 |
+
csv_hard_captions_key: neg_caption
|
| 8 |
+
csv_img_key: img_path
|
| 9 |
+
csv_separator: ,
|
| 10 |
+
dataset_resampled: False
|
| 11 |
+
dataset_type: csv
|
| 12 |
+
ddp_static_graph: False
|
| 13 |
+
debug: False
|
| 14 |
+
device: cuda:0
|
| 15 |
+
dist_backend: nccl
|
| 16 |
+
dist_url: env://
|
| 17 |
+
distributed: True
|
| 18 |
+
epochs: 3
|
| 19 |
+
eps: 1e-06
|
| 20 |
+
force_quick_gelu: True
|
| 21 |
+
gather_with_grad: False
|
| 22 |
+
grad_checkpointing: False
|
| 23 |
+
horovod: False
|
| 24 |
+
imagenet_v2: None
|
| 25 |
+
imagenet_val: None
|
| 26 |
+
local_loss: False
|
| 27 |
+
local_rank: 0
|
| 28 |
+
lock_image: False
|
| 29 |
+
lock_image_freeze_bn_stats: False
|
| 30 |
+
lock_image_unlocked_groups: 0
|
| 31 |
+
log_level: 20
|
| 32 |
+
log_local: False
|
| 33 |
+
log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 34 |
+
logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 35 |
+
lr: 5e-06
|
| 36 |
+
model: ViT-L-14-336
|
| 37 |
+
name: 2024_11_18-16_47_17-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 38 |
+
no_set_device_rank: False
|
| 39 |
+
norm_gradient_clip: None
|
| 40 |
+
precision: amp
|
| 41 |
+
pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 42 |
+
pretrained_image: False
|
| 43 |
+
rank: 0
|
| 44 |
+
report_to: wandb
|
| 45 |
+
resume: None
|
| 46 |
+
save_frequency: 1
|
| 47 |
+
save_most_recent: False
|
| 48 |
+
seed: 0
|
| 49 |
+
skip_scheduler: False
|
| 50 |
+
tensorboard: False
|
| 51 |
+
tensorboard_path:
|
| 52 |
+
torchscript: False
|
| 53 |
+
trace: False
|
| 54 |
+
train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 55 |
+
train_num_samples: None
|
| 56 |
+
use_bn_sync: False
|
| 57 |
+
val_data: None
|
| 58 |
+
val_frequency: 1
|
| 59 |
+
val_num_samples: None
|
| 60 |
+
wandb: True
|
| 61 |
+
wandb_notes:
|
| 62 |
+
wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 63 |
+
warmup: 0
|
| 64 |
+
wd: 0.1
|
| 65 |
+
workers: 4
|
| 66 |
+
world_size: 8
|
| 67 |
+
zeroshot_frequency: 2
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2024-11-18,16:48:21 | INFO | Running in distributed mode with multiple processes. Device: cuda:0.Process (global: 0, local 0), total 8.
|
| 2 |
+
2024-11-18,16:48:21 | INFO | Loading ViT-L-14-336 model config.
|
| 3 |
+
2024-11-18,16:48:25 | INFO | Loading pretrained ViT-L-14-336 weights (data/openclip-vit-14-336/openclip_model.pt).
|
| 4 |
+
2024-11-18,16:48:31 | INFO | Model:
|
| 5 |
+
2024-11-18,16:48:31 | INFO | CLIP(
|
| 6 |
+
(visual): VisualTransformer(
|
| 7 |
+
(conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14), bias=False)
|
| 8 |
+
(ln_pre): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 9 |
+
(transformer): Transformer(
|
| 10 |
+
(resblocks): ModuleList(
|
| 11 |
+
(0-23): 24 x ResidualAttentionBlock(
|
| 12 |
+
(attn): MultiheadAttention(
|
| 13 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=1024, out_features=1024, bias=True)
|
| 14 |
+
)
|
| 15 |
+
(ln_1): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 16 |
+
(mlp): Sequential(
|
| 17 |
+
(c_fc): Linear(in_features=1024, out_features=4096, bias=True)
|
| 18 |
+
(gelu): QuickGELU()
|
| 19 |
+
(c_proj): Linear(in_features=4096, out_features=1024, bias=True)
|
| 20 |
+
)
|
| 21 |
+
(ln_2): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
)
|
| 25 |
+
(ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 26 |
+
)
|
| 27 |
+
(transformer): Transformer(
|
| 28 |
+
(resblocks): ModuleList(
|
| 29 |
+
(0-11): 12 x ResidualAttentionBlock(
|
| 30 |
+
(attn): MultiheadAttention(
|
| 31 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)
|
| 32 |
+
)
|
| 33 |
+
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 34 |
+
(mlp): Sequential(
|
| 35 |
+
(c_fc): Linear(in_features=768, out_features=3072, bias=True)
|
| 36 |
+
(gelu): QuickGELU()
|
| 37 |
+
(c_proj): Linear(in_features=3072, out_features=768, bias=True)
|
| 38 |
+
)
|
| 39 |
+
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
(token_embedding): Embedding(49408, 768)
|
| 44 |
+
(ln_final): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 45 |
+
)
|
| 46 |
+
2024-11-18,16:48:31 | INFO | Params:
|
| 47 |
+
2024-11-18,16:48:31 | INFO | batch_size: 64
|
| 48 |
+
2024-11-18,16:48:31 | INFO | beta1: 0.9
|
| 49 |
+
2024-11-18,16:48:31 | INFO | beta2: 0.98
|
| 50 |
+
2024-11-18,16:48:31 | INFO | checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 51 |
+
2024-11-18,16:48:31 | INFO | copy_codebase: False
|
| 52 |
+
2024-11-18,16:48:31 | INFO | csv_caption_key: caption
|
| 53 |
+
2024-11-18,16:48:31 | INFO | csv_hard_captions_key: neg_caption
|
| 54 |
+
2024-11-18,16:48:31 | INFO | csv_img_key: img_path
|
| 55 |
+
2024-11-18,16:48:31 | INFO | csv_separator: ,
|
| 56 |
+
2024-11-18,16:48:31 | INFO | dataset_resampled: False
|
| 57 |
+
2024-11-18,16:48:31 | INFO | dataset_type: csv
|
| 58 |
+
2024-11-18,16:48:31 | INFO | ddp_static_graph: False
|
| 59 |
+
2024-11-18,16:48:31 | INFO | debug: False
|
| 60 |
+
2024-11-18,16:48:31 | INFO | device: cuda:0
|
| 61 |
+
2024-11-18,16:48:31 | INFO | dist_backend: nccl
|
| 62 |
+
2024-11-18,16:48:31 | INFO | dist_url: env://
|
| 63 |
+
2024-11-18,16:48:31 | INFO | distributed: True
|
| 64 |
+
2024-11-18,16:48:31 | INFO | epochs: 3
|
| 65 |
+
2024-11-18,16:48:31 | INFO | eps: 1e-06
|
| 66 |
+
2024-11-18,16:48:31 | INFO | force_quick_gelu: True
|
| 67 |
+
2024-11-18,16:48:31 | INFO | gather_with_grad: False
|
| 68 |
+
2024-11-18,16:48:31 | INFO | grad_checkpointing: False
|
| 69 |
+
2024-11-18,16:48:31 | INFO | horovod: False
|
| 70 |
+
2024-11-18,16:48:31 | INFO | imagenet_v2: None
|
| 71 |
+
2024-11-18,16:48:31 | INFO | imagenet_val: None
|
| 72 |
+
2024-11-18,16:48:31 | INFO | local_loss: False
|
| 73 |
+
2024-11-18,16:48:31 | INFO | local_rank: 0
|
| 74 |
+
2024-11-18,16:48:31 | INFO | lock_image: False
|
| 75 |
+
2024-11-18,16:48:31 | INFO | lock_image_freeze_bn_stats: False
|
| 76 |
+
2024-11-18,16:48:31 | INFO | lock_image_unlocked_groups: 0
|
| 77 |
+
2024-11-18,16:48:31 | INFO | log_level: 20
|
| 78 |
+
2024-11-18,16:48:31 | INFO | log_local: False
|
| 79 |
+
2024-11-18,16:48:31 | INFO | log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 80 |
+
2024-11-18,16:48:31 | INFO | logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 81 |
+
2024-11-18,16:48:31 | INFO | lr: 5e-06
|
| 82 |
+
2024-11-18,16:48:31 | INFO | model: ViT-L-14-336
|
| 83 |
+
2024-11-18,16:48:31 | INFO | name: 2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 84 |
+
2024-11-18,16:48:31 | INFO | no_set_device_rank: False
|
| 85 |
+
2024-11-18,16:48:31 | INFO | norm_gradient_clip: None
|
| 86 |
+
2024-11-18,16:48:31 | INFO | precision: amp
|
| 87 |
+
2024-11-18,16:48:31 | INFO | pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 88 |
+
2024-11-18,16:48:31 | INFO | pretrained_image: False
|
| 89 |
+
2024-11-18,16:48:31 | INFO | rank: 0
|
| 90 |
+
2024-11-18,16:48:31 | INFO | report_to: wandb
|
| 91 |
+
2024-11-18,16:48:31 | INFO | resume: None
|
| 92 |
+
2024-11-18,16:48:31 | INFO | save_frequency: 1
|
| 93 |
+
2024-11-18,16:48:31 | INFO | save_most_recent: False
|
| 94 |
+
2024-11-18,16:48:31 | INFO | seed: 0
|
| 95 |
+
2024-11-18,16:48:31 | INFO | skip_scheduler: False
|
| 96 |
+
2024-11-18,16:48:31 | INFO | tensorboard: False
|
| 97 |
+
2024-11-18,16:48:31 | INFO | tensorboard_path:
|
| 98 |
+
2024-11-18,16:48:31 | INFO | torchscript: False
|
| 99 |
+
2024-11-18,16:48:31 | INFO | trace: False
|
| 100 |
+
2024-11-18,16:48:31 | INFO | train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 101 |
+
2024-11-18,16:48:31 | INFO | train_num_samples: None
|
| 102 |
+
2024-11-18,16:48:31 | INFO | use_bn_sync: False
|
| 103 |
+
2024-11-18,16:48:31 | INFO | val_data: None
|
| 104 |
+
2024-11-18,16:48:31 | INFO | val_frequency: 1
|
| 105 |
+
2024-11-18,16:48:31 | INFO | val_num_samples: None
|
| 106 |
+
2024-11-18,16:48:31 | INFO | wandb: True
|
| 107 |
+
2024-11-18,16:48:31 | INFO | wandb_notes:
|
| 108 |
+
2024-11-18,16:48:31 | INFO | wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 109 |
+
2024-11-18,16:48:31 | INFO | warmup: 0
|
| 110 |
+
2024-11-18,16:48:31 | INFO | wd: 0.1
|
| 111 |
+
2024-11-18,16:48:31 | INFO | workers: 4
|
| 112 |
+
2024-11-18,16:48:31 | INFO | world_size: 8
|
| 113 |
+
2024-11-18,16:48:31 | INFO | zeroshot_frequency: 2
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size: 64
|
| 2 |
+
beta1: 0.9
|
| 3 |
+
beta2: 0.98
|
| 4 |
+
checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 5 |
+
copy_codebase: False
|
| 6 |
+
csv_caption_key: caption
|
| 7 |
+
csv_hard_captions_key: neg_caption
|
| 8 |
+
csv_img_key: img_path
|
| 9 |
+
csv_separator: ,
|
| 10 |
+
dataset_resampled: False
|
| 11 |
+
dataset_type: csv
|
| 12 |
+
ddp_static_graph: False
|
| 13 |
+
debug: False
|
| 14 |
+
device: cuda:0
|
| 15 |
+
dist_backend: nccl
|
| 16 |
+
dist_url: env://
|
| 17 |
+
distributed: True
|
| 18 |
+
epochs: 3
|
| 19 |
+
eps: 1e-06
|
| 20 |
+
force_quick_gelu: True
|
| 21 |
+
gather_with_grad: False
|
| 22 |
+
grad_checkpointing: False
|
| 23 |
+
horovod: False
|
| 24 |
+
imagenet_v2: None
|
| 25 |
+
imagenet_val: None
|
| 26 |
+
local_loss: False
|
| 27 |
+
local_rank: 0
|
| 28 |
+
lock_image: False
|
| 29 |
+
lock_image_freeze_bn_stats: False
|
| 30 |
+
lock_image_unlocked_groups: 0
|
| 31 |
+
log_level: 20
|
| 32 |
+
log_local: False
|
| 33 |
+
log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 34 |
+
logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 35 |
+
lr: 5e-06
|
| 36 |
+
model: ViT-L-14-336
|
| 37 |
+
name: 2024_11_18-16_48_21-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 38 |
+
no_set_device_rank: False
|
| 39 |
+
norm_gradient_clip: None
|
| 40 |
+
precision: amp
|
| 41 |
+
pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 42 |
+
pretrained_image: False
|
| 43 |
+
rank: 0
|
| 44 |
+
report_to: wandb
|
| 45 |
+
resume: None
|
| 46 |
+
save_frequency: 1
|
| 47 |
+
save_most_recent: False
|
| 48 |
+
seed: 0
|
| 49 |
+
skip_scheduler: False
|
| 50 |
+
tensorboard: False
|
| 51 |
+
tensorboard_path:
|
| 52 |
+
torchscript: False
|
| 53 |
+
trace: False
|
| 54 |
+
train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 55 |
+
train_num_samples: None
|
| 56 |
+
use_bn_sync: False
|
| 57 |
+
val_data: None
|
| 58 |
+
val_frequency: 1
|
| 59 |
+
val_num_samples: None
|
| 60 |
+
wandb: True
|
| 61 |
+
wandb_notes:
|
| 62 |
+
wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 63 |
+
warmup: 0
|
| 64 |
+
wd: 0.1
|
| 65 |
+
workers: 4
|
| 66 |
+
world_size: 8
|
| 67 |
+
zeroshot_frequency: 2
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2024-11-18,16:52:06 | INFO | Running in distributed mode with multiple processes. Device: cuda:0.Process (global: 0, local 0), total 8.
|
| 2 |
+
2024-11-18,16:52:06 | INFO | Loading ViT-L-14-336 model config.
|
| 3 |
+
2024-11-18,16:52:10 | INFO | Loading pretrained ViT-L-14-336 weights (data/openclip-vit-14-336/openclip_model.pt).
|
| 4 |
+
2024-11-18,16:52:19 | INFO | Model:
|
| 5 |
+
2024-11-18,16:52:19 | INFO | CLIP(
|
| 6 |
+
(visual): VisualTransformer(
|
| 7 |
+
(conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14), bias=False)
|
| 8 |
+
(ln_pre): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 9 |
+
(transformer): Transformer(
|
| 10 |
+
(resblocks): ModuleList(
|
| 11 |
+
(0-23): 24 x ResidualAttentionBlock(
|
| 12 |
+
(attn): MultiheadAttention(
|
| 13 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=1024, out_features=1024, bias=True)
|
| 14 |
+
)
|
| 15 |
+
(ln_1): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 16 |
+
(mlp): Sequential(
|
| 17 |
+
(c_fc): Linear(in_features=1024, out_features=4096, bias=True)
|
| 18 |
+
(gelu): QuickGELU()
|
| 19 |
+
(c_proj): Linear(in_features=4096, out_features=1024, bias=True)
|
| 20 |
+
)
|
| 21 |
+
(ln_2): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
)
|
| 25 |
+
(ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 26 |
+
)
|
| 27 |
+
(transformer): Transformer(
|
| 28 |
+
(resblocks): ModuleList(
|
| 29 |
+
(0-11): 12 x ResidualAttentionBlock(
|
| 30 |
+
(attn): MultiheadAttention(
|
| 31 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)
|
| 32 |
+
)
|
| 33 |
+
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 34 |
+
(mlp): Sequential(
|
| 35 |
+
(c_fc): Linear(in_features=768, out_features=3072, bias=True)
|
| 36 |
+
(gelu): QuickGELU()
|
| 37 |
+
(c_proj): Linear(in_features=3072, out_features=768, bias=True)
|
| 38 |
+
)
|
| 39 |
+
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
(token_embedding): Embedding(49408, 768)
|
| 44 |
+
(ln_final): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 45 |
+
)
|
| 46 |
+
2024-11-18,16:52:19 | INFO | Params:
|
| 47 |
+
2024-11-18,16:52:19 | INFO | batch_size: 64
|
| 48 |
+
2024-11-18,16:52:19 | INFO | beta1: 0.9
|
| 49 |
+
2024-11-18,16:52:19 | INFO | beta2: 0.98
|
| 50 |
+
2024-11-18,16:52:19 | INFO | checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 51 |
+
2024-11-18,16:52:19 | INFO | copy_codebase: False
|
| 52 |
+
2024-11-18,16:52:19 | INFO | csv_caption_key: caption
|
| 53 |
+
2024-11-18,16:52:19 | INFO | csv_hard_captions_key: neg_caption
|
| 54 |
+
2024-11-18,16:52:19 | INFO | csv_img_key: img_path
|
| 55 |
+
2024-11-18,16:52:19 | INFO | csv_separator: ,
|
| 56 |
+
2024-11-18,16:52:19 | INFO | dataset_resampled: False
|
| 57 |
+
2024-11-18,16:52:19 | INFO | dataset_type: csv
|
| 58 |
+
2024-11-18,16:52:19 | INFO | ddp_static_graph: False
|
| 59 |
+
2024-11-18,16:52:19 | INFO | debug: False
|
| 60 |
+
2024-11-18,16:52:19 | INFO | device: cuda:0
|
| 61 |
+
2024-11-18,16:52:19 | INFO | dist_backend: nccl
|
| 62 |
+
2024-11-18,16:52:19 | INFO | dist_url: env://
|
| 63 |
+
2024-11-18,16:52:19 | INFO | distributed: True
|
| 64 |
+
2024-11-18,16:52:19 | INFO | epochs: 3
|
| 65 |
+
2024-11-18,16:52:19 | INFO | eps: 1e-06
|
| 66 |
+
2024-11-18,16:52:19 | INFO | force_quick_gelu: True
|
| 67 |
+
2024-11-18,16:52:19 | INFO | gather_with_grad: False
|
| 68 |
+
2024-11-18,16:52:19 | INFO | grad_checkpointing: False
|
| 69 |
+
2024-11-18,16:52:19 | INFO | horovod: False
|
| 70 |
+
2024-11-18,16:52:19 | INFO | imagenet_v2: None
|
| 71 |
+
2024-11-18,16:52:19 | INFO | imagenet_val: None
|
| 72 |
+
2024-11-18,16:52:19 | INFO | local_loss: False
|
| 73 |
+
2024-11-18,16:52:19 | INFO | local_rank: 0
|
| 74 |
+
2024-11-18,16:52:19 | INFO | lock_image: False
|
| 75 |
+
2024-11-18,16:52:19 | INFO | lock_image_freeze_bn_stats: False
|
| 76 |
+
2024-11-18,16:52:19 | INFO | lock_image_unlocked_groups: 0
|
| 77 |
+
2024-11-18,16:52:19 | INFO | log_level: 20
|
| 78 |
+
2024-11-18,16:52:19 | INFO | log_local: False
|
| 79 |
+
2024-11-18,16:52:19 | INFO | log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 80 |
+
2024-11-18,16:52:19 | INFO | logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 81 |
+
2024-11-18,16:52:19 | INFO | lr: 5e-06
|
| 82 |
+
2024-11-18,16:52:19 | INFO | model: ViT-L-14-336
|
| 83 |
+
2024-11-18,16:52:19 | INFO | name: 2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 84 |
+
2024-11-18,16:52:19 | INFO | no_set_device_rank: False
|
| 85 |
+
2024-11-18,16:52:19 | INFO | norm_gradient_clip: None
|
| 86 |
+
2024-11-18,16:52:19 | INFO | precision: amp
|
| 87 |
+
2024-11-18,16:52:19 | INFO | pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 88 |
+
2024-11-18,16:52:19 | INFO | pretrained_image: False
|
| 89 |
+
2024-11-18,16:52:19 | INFO | rank: 0
|
| 90 |
+
2024-11-18,16:52:19 | INFO | report_to: wandb
|
| 91 |
+
2024-11-18,16:52:19 | INFO | resume: None
|
| 92 |
+
2024-11-18,16:52:19 | INFO | save_frequency: 1
|
| 93 |
+
2024-11-18,16:52:19 | INFO | save_most_recent: False
|
| 94 |
+
2024-11-18,16:52:19 | INFO | seed: 0
|
| 95 |
+
2024-11-18,16:52:19 | INFO | skip_scheduler: False
|
| 96 |
+
2024-11-18,16:52:19 | INFO | tensorboard: False
|
| 97 |
+
2024-11-18,16:52:19 | INFO | tensorboard_path:
|
| 98 |
+
2024-11-18,16:52:19 | INFO | torchscript: False
|
| 99 |
+
2024-11-18,16:52:19 | INFO | trace: False
|
| 100 |
+
2024-11-18,16:52:19 | INFO | train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 101 |
+
2024-11-18,16:52:19 | INFO | train_num_samples: None
|
| 102 |
+
2024-11-18,16:52:19 | INFO | use_bn_sync: False
|
| 103 |
+
2024-11-18,16:52:19 | INFO | val_data: None
|
| 104 |
+
2024-11-18,16:52:19 | INFO | val_frequency: 1
|
| 105 |
+
2024-11-18,16:52:19 | INFO | val_num_samples: None
|
| 106 |
+
2024-11-18,16:52:19 | INFO | wandb: True
|
| 107 |
+
2024-11-18,16:52:19 | INFO | wandb_notes:
|
| 108 |
+
2024-11-18,16:52:19 | INFO | wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 109 |
+
2024-11-18,16:52:19 | INFO | warmup: 0
|
| 110 |
+
2024-11-18,16:52:19 | INFO | wd: 0.1
|
| 111 |
+
2024-11-18,16:52:19 | INFO | workers: 4
|
| 112 |
+
2024-11-18,16:52:19 | INFO | world_size: 8
|
| 113 |
+
2024-11-18,16:52:19 | INFO | zeroshot_frequency: 2
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size: 64
|
| 2 |
+
beta1: 0.9
|
| 3 |
+
beta2: 0.98
|
| 4 |
+
checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 5 |
+
copy_codebase: False
|
| 6 |
+
csv_caption_key: caption
|
| 7 |
+
csv_hard_captions_key: neg_caption
|
| 8 |
+
csv_img_key: img_path
|
| 9 |
+
csv_separator: ,
|
| 10 |
+
dataset_resampled: False
|
| 11 |
+
dataset_type: csv
|
| 12 |
+
ddp_static_graph: False
|
| 13 |
+
debug: False
|
| 14 |
+
device: cuda:0
|
| 15 |
+
dist_backend: nccl
|
| 16 |
+
dist_url: env://
|
| 17 |
+
distributed: True
|
| 18 |
+
epochs: 3
|
| 19 |
+
eps: 1e-06
|
| 20 |
+
force_quick_gelu: True
|
| 21 |
+
gather_with_grad: False
|
| 22 |
+
grad_checkpointing: False
|
| 23 |
+
horovod: False
|
| 24 |
+
imagenet_v2: None
|
| 25 |
+
imagenet_val: None
|
| 26 |
+
local_loss: False
|
| 27 |
+
local_rank: 0
|
| 28 |
+
lock_image: False
|
| 29 |
+
lock_image_freeze_bn_stats: False
|
| 30 |
+
lock_image_unlocked_groups: 0
|
| 31 |
+
log_level: 20
|
| 32 |
+
log_local: False
|
| 33 |
+
log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 34 |
+
logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 35 |
+
lr: 5e-06
|
| 36 |
+
model: ViT-L-14-336
|
| 37 |
+
name: 2024_11_18-16_52_06-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 38 |
+
no_set_device_rank: False
|
| 39 |
+
norm_gradient_clip: None
|
| 40 |
+
precision: amp
|
| 41 |
+
pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 42 |
+
pretrained_image: False
|
| 43 |
+
rank: 0
|
| 44 |
+
report_to: wandb
|
| 45 |
+
resume: None
|
| 46 |
+
save_frequency: 1
|
| 47 |
+
save_most_recent: False
|
| 48 |
+
seed: 0
|
| 49 |
+
skip_scheduler: False
|
| 50 |
+
tensorboard: False
|
| 51 |
+
tensorboard_path:
|
| 52 |
+
torchscript: False
|
| 53 |
+
trace: False
|
| 54 |
+
train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 55 |
+
train_num_samples: None
|
| 56 |
+
use_bn_sync: False
|
| 57 |
+
val_data: None
|
| 58 |
+
val_frequency: 1
|
| 59 |
+
val_num_samples: None
|
| 60 |
+
wandb: True
|
| 61 |
+
wandb_notes:
|
| 62 |
+
wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 63 |
+
warmup: 0
|
| 64 |
+
wd: 0.1
|
| 65 |
+
workers: 4
|
| 66 |
+
world_size: 8
|
| 67 |
+
zeroshot_frequency: 2
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2024-11-19,13:26:10 | INFO | Running in distributed mode with multiple processes. Device: cuda:0.Process (global: 0, local 0), total 8.
|
| 2 |
+
2024-11-19,13:26:10 | INFO | Loading ViT-L-14-336 model config.
|
| 3 |
+
2024-11-19,13:26:13 | INFO | Loading pretrained ViT-L-14-336 weights (data/openclip-vit-14-336/openclip_model.pt).
|
| 4 |
+
2024-11-19,13:26:20 | INFO | Model:
|
| 5 |
+
2024-11-19,13:26:20 | INFO | CLIP(
|
| 6 |
+
(visual): VisualTransformer(
|
| 7 |
+
(conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14), bias=False)
|
| 8 |
+
(ln_pre): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 9 |
+
(transformer): Transformer(
|
| 10 |
+
(resblocks): ModuleList(
|
| 11 |
+
(0-23): 24 x ResidualAttentionBlock(
|
| 12 |
+
(attn): MultiheadAttention(
|
| 13 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=1024, out_features=1024, bias=True)
|
| 14 |
+
)
|
| 15 |
+
(ln_1): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 16 |
+
(mlp): Sequential(
|
| 17 |
+
(c_fc): Linear(in_features=1024, out_features=4096, bias=True)
|
| 18 |
+
(gelu): QuickGELU()
|
| 19 |
+
(c_proj): Linear(in_features=4096, out_features=1024, bias=True)
|
| 20 |
+
)
|
| 21 |
+
(ln_2): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
)
|
| 25 |
+
(ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 26 |
+
)
|
| 27 |
+
(transformer): Transformer(
|
| 28 |
+
(resblocks): ModuleList(
|
| 29 |
+
(0-11): 12 x ResidualAttentionBlock(
|
| 30 |
+
(attn): MultiheadAttention(
|
| 31 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)
|
| 32 |
+
)
|
| 33 |
+
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 34 |
+
(mlp): Sequential(
|
| 35 |
+
(c_fc): Linear(in_features=768, out_features=3072, bias=True)
|
| 36 |
+
(gelu): QuickGELU()
|
| 37 |
+
(c_proj): Linear(in_features=3072, out_features=768, bias=True)
|
| 38 |
+
)
|
| 39 |
+
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
(token_embedding): Embedding(49408, 768)
|
| 44 |
+
(ln_final): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 45 |
+
)
|
| 46 |
+
2024-11-19,13:26:20 | INFO | Params:
|
| 47 |
+
2024-11-19,13:26:20 | INFO | batch_size: 64
|
| 48 |
+
2024-11-19,13:26:20 | INFO | beta1: 0.9
|
| 49 |
+
2024-11-19,13:26:20 | INFO | beta2: 0.98
|
| 50 |
+
2024-11-19,13:26:20 | INFO | checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 51 |
+
2024-11-19,13:26:20 | INFO | copy_codebase: False
|
| 52 |
+
2024-11-19,13:26:20 | INFO | csv_caption_key: caption
|
| 53 |
+
2024-11-19,13:26:20 | INFO | csv_hard_captions_key: neg_caption
|
| 54 |
+
2024-11-19,13:26:20 | INFO | csv_img_key: img_path
|
| 55 |
+
2024-11-19,13:26:20 | INFO | csv_separator: ,
|
| 56 |
+
2024-11-19,13:26:20 | INFO | dataset_resampled: False
|
| 57 |
+
2024-11-19,13:26:20 | INFO | dataset_type: csv
|
| 58 |
+
2024-11-19,13:26:20 | INFO | ddp_static_graph: False
|
| 59 |
+
2024-11-19,13:26:20 | INFO | debug: False
|
| 60 |
+
2024-11-19,13:26:20 | INFO | device: cuda:0
|
| 61 |
+
2024-11-19,13:26:20 | INFO | dist_backend: nccl
|
| 62 |
+
2024-11-19,13:26:20 | INFO | dist_url: env://
|
| 63 |
+
2024-11-19,13:26:20 | INFO | distributed: True
|
| 64 |
+
2024-11-19,13:26:20 | INFO | epochs: 3
|
| 65 |
+
2024-11-19,13:26:20 | INFO | eps: 1e-06
|
| 66 |
+
2024-11-19,13:26:20 | INFO | force_quick_gelu: True
|
| 67 |
+
2024-11-19,13:26:20 | INFO | gather_with_grad: False
|
| 68 |
+
2024-11-19,13:26:20 | INFO | grad_checkpointing: False
|
| 69 |
+
2024-11-19,13:26:20 | INFO | horovod: False
|
| 70 |
+
2024-11-19,13:26:20 | INFO | imagenet_v2: None
|
| 71 |
+
2024-11-19,13:26:20 | INFO | imagenet_val: None
|
| 72 |
+
2024-11-19,13:26:20 | INFO | local_loss: False
|
| 73 |
+
2024-11-19,13:26:20 | INFO | local_rank: 0
|
| 74 |
+
2024-11-19,13:26:20 | INFO | lock_image: False
|
| 75 |
+
2024-11-19,13:26:20 | INFO | lock_image_freeze_bn_stats: False
|
| 76 |
+
2024-11-19,13:26:20 | INFO | lock_image_unlocked_groups: 0
|
| 77 |
+
2024-11-19,13:26:20 | INFO | log_level: 20
|
| 78 |
+
2024-11-19,13:26:20 | INFO | log_local: False
|
| 79 |
+
2024-11-19,13:26:20 | INFO | log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 80 |
+
2024-11-19,13:26:20 | INFO | logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 81 |
+
2024-11-19,13:26:20 | INFO | lr: 5e-06
|
| 82 |
+
2024-11-19,13:26:20 | INFO | model: ViT-L-14-336
|
| 83 |
+
2024-11-19,13:26:20 | INFO | name: 2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 84 |
+
2024-11-19,13:26:20 | INFO | no_set_device_rank: False
|
| 85 |
+
2024-11-19,13:26:20 | INFO | norm_gradient_clip: None
|
| 86 |
+
2024-11-19,13:26:20 | INFO | precision: amp
|
| 87 |
+
2024-11-19,13:26:20 | INFO | pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 88 |
+
2024-11-19,13:26:20 | INFO | pretrained_image: False
|
| 89 |
+
2024-11-19,13:26:20 | INFO | rank: 0
|
| 90 |
+
2024-11-19,13:26:20 | INFO | report_to: wandb
|
| 91 |
+
2024-11-19,13:26:20 | INFO | resume: None
|
| 92 |
+
2024-11-19,13:26:20 | INFO | save_frequency: 1
|
| 93 |
+
2024-11-19,13:26:20 | INFO | save_most_recent: False
|
| 94 |
+
2024-11-19,13:26:20 | INFO | seed: 0
|
| 95 |
+
2024-11-19,13:26:20 | INFO | skip_scheduler: False
|
| 96 |
+
2024-11-19,13:26:20 | INFO | tensorboard: False
|
| 97 |
+
2024-11-19,13:26:20 | INFO | tensorboard_path:
|
| 98 |
+
2024-11-19,13:26:20 | INFO | torchscript: False
|
| 99 |
+
2024-11-19,13:26:20 | INFO | trace: False
|
| 100 |
+
2024-11-19,13:26:20 | INFO | train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 101 |
+
2024-11-19,13:26:20 | INFO | train_num_samples: None
|
| 102 |
+
2024-11-19,13:26:20 | INFO | use_bn_sync: False
|
| 103 |
+
2024-11-19,13:26:20 | INFO | val_data: None
|
| 104 |
+
2024-11-19,13:26:20 | INFO | val_frequency: 1
|
| 105 |
+
2024-11-19,13:26:20 | INFO | val_num_samples: None
|
| 106 |
+
2024-11-19,13:26:20 | INFO | wandb: True
|
| 107 |
+
2024-11-19,13:26:20 | INFO | wandb_notes:
|
| 108 |
+
2024-11-19,13:26:20 | INFO | wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 109 |
+
2024-11-19,13:26:20 | INFO | warmup: 0
|
| 110 |
+
2024-11-19,13:26:20 | INFO | wd: 0.1
|
| 111 |
+
2024-11-19,13:26:20 | INFO | workers: 4
|
| 112 |
+
2024-11-19,13:26:20 | INFO | world_size: 8
|
| 113 |
+
2024-11-19,13:26:20 | INFO | zeroshot_frequency: 2
|
| 114 |
+
2024-11-19,13:26:28 | INFO | wrong parsering the python class
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size: 64
|
| 2 |
+
beta1: 0.9
|
| 3 |
+
beta2: 0.98
|
| 4 |
+
checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 5 |
+
copy_codebase: False
|
| 6 |
+
csv_caption_key: caption
|
| 7 |
+
csv_hard_captions_key: neg_caption
|
| 8 |
+
csv_img_key: img_path
|
| 9 |
+
csv_separator: ,
|
| 10 |
+
dataset_resampled: False
|
| 11 |
+
dataset_type: csv
|
| 12 |
+
ddp_static_graph: False
|
| 13 |
+
debug: False
|
| 14 |
+
device: cuda:0
|
| 15 |
+
dist_backend: nccl
|
| 16 |
+
dist_url: env://
|
| 17 |
+
distributed: True
|
| 18 |
+
epochs: 3
|
| 19 |
+
eps: 1e-06
|
| 20 |
+
force_quick_gelu: True
|
| 21 |
+
gather_with_grad: False
|
| 22 |
+
grad_checkpointing: False
|
| 23 |
+
horovod: False
|
| 24 |
+
imagenet_v2: None
|
| 25 |
+
imagenet_val: None
|
| 26 |
+
local_loss: False
|
| 27 |
+
local_rank: 0
|
| 28 |
+
lock_image: False
|
| 29 |
+
lock_image_freeze_bn_stats: False
|
| 30 |
+
lock_image_unlocked_groups: 0
|
| 31 |
+
log_level: 20
|
| 32 |
+
log_local: False
|
| 33 |
+
log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 34 |
+
logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 35 |
+
lr: 5e-06
|
| 36 |
+
model: ViT-L-14-336
|
| 37 |
+
name: 2024_11_19-13_26_10-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 38 |
+
no_set_device_rank: False
|
| 39 |
+
norm_gradient_clip: None
|
| 40 |
+
precision: amp
|
| 41 |
+
pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 42 |
+
pretrained_image: False
|
| 43 |
+
rank: 0
|
| 44 |
+
report_to: wandb
|
| 45 |
+
resume: None
|
| 46 |
+
save_frequency: 1
|
| 47 |
+
save_most_recent: False
|
| 48 |
+
seed: 0
|
| 49 |
+
skip_scheduler: False
|
| 50 |
+
tensorboard: False
|
| 51 |
+
tensorboard_path:
|
| 52 |
+
torchscript: False
|
| 53 |
+
trace: False
|
| 54 |
+
train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 55 |
+
train_num_samples: None
|
| 56 |
+
use_bn_sync: False
|
| 57 |
+
val_data: None
|
| 58 |
+
val_frequency: 1
|
| 59 |
+
val_num_samples: None
|
| 60 |
+
wandb: True
|
| 61 |
+
wandb_notes:
|
| 62 |
+
wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 63 |
+
warmup: 0
|
| 64 |
+
wd: 0.1
|
| 65 |
+
workers: 4
|
| 66 |
+
world_size: 8
|
| 67 |
+
zeroshot_frequency: 2
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints/epoch_1.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1decf3db941e392cdd3b2e3cab3fb44171bede8cf93da26572e4f8d7e791a771
|
| 3 |
+
size 5135890710
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints/epoch_2.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ece5ac6ad7d70789e213f8dead83a224ec7a1fddf8740733193e9267c6c6a389
|
| 3 |
+
size 5135890710
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints/epoch_3.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:43655e9d4a232f310e156a8ffc265e3528050162841f73a55e90f86cfa7ce294
|
| 3 |
+
size 5135890710
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2024-11-19,15:28:36 | INFO | Running in distributed mode with multiple processes. Device: cuda:0.Process (global: 0, local 0), total 8.
|
| 2 |
+
2024-11-19,15:28:36 | INFO | Loading ViT-L-14-336 model config.
|
| 3 |
+
2024-11-19,15:28:39 | INFO | Loading pretrained ViT-L-14-336 weights (data/openclip-vit-14-336/openclip_model.pt).
|
| 4 |
+
2024-11-19,15:28:47 | INFO | Model:
|
| 5 |
+
2024-11-19,15:28:47 | INFO | CLIP(
|
| 6 |
+
(visual): VisualTransformer(
|
| 7 |
+
(conv1): Conv2d(3, 1024, kernel_size=(14, 14), stride=(14, 14), bias=False)
|
| 8 |
+
(ln_pre): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 9 |
+
(transformer): Transformer(
|
| 10 |
+
(resblocks): ModuleList(
|
| 11 |
+
(0-23): 24 x ResidualAttentionBlock(
|
| 12 |
+
(attn): MultiheadAttention(
|
| 13 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=1024, out_features=1024, bias=True)
|
| 14 |
+
)
|
| 15 |
+
(ln_1): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 16 |
+
(mlp): Sequential(
|
| 17 |
+
(c_fc): Linear(in_features=1024, out_features=4096, bias=True)
|
| 18 |
+
(gelu): QuickGELU()
|
| 19 |
+
(c_proj): Linear(in_features=4096, out_features=1024, bias=True)
|
| 20 |
+
)
|
| 21 |
+
(ln_2): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 22 |
+
)
|
| 23 |
+
)
|
| 24 |
+
)
|
| 25 |
+
(ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)
|
| 26 |
+
)
|
| 27 |
+
(transformer): Transformer(
|
| 28 |
+
(resblocks): ModuleList(
|
| 29 |
+
(0-11): 12 x ResidualAttentionBlock(
|
| 30 |
+
(attn): MultiheadAttention(
|
| 31 |
+
(out_proj): NonDynamicallyQuantizableLinear(in_features=768, out_features=768, bias=True)
|
| 32 |
+
)
|
| 33 |
+
(ln_1): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 34 |
+
(mlp): Sequential(
|
| 35 |
+
(c_fc): Linear(in_features=768, out_features=3072, bias=True)
|
| 36 |
+
(gelu): QuickGELU()
|
| 37 |
+
(c_proj): Linear(in_features=3072, out_features=768, bias=True)
|
| 38 |
+
)
|
| 39 |
+
(ln_2): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
)
|
| 43 |
+
(token_embedding): Embedding(49408, 768)
|
| 44 |
+
(ln_final): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
| 45 |
+
)
|
| 46 |
+
2024-11-19,15:28:47 | INFO | Params:
|
| 47 |
+
2024-11-19,15:28:47 | INFO | batch_size: 64
|
| 48 |
+
2024-11-19,15:28:47 | INFO | beta1: 0.9
|
| 49 |
+
2024-11-19,15:28:47 | INFO | beta2: 0.98
|
| 50 |
+
2024-11-19,15:28:47 | INFO | checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 51 |
+
2024-11-19,15:28:47 | INFO | copy_codebase: False
|
| 52 |
+
2024-11-19,15:28:47 | INFO | csv_caption_key: caption
|
| 53 |
+
2024-11-19,15:28:47 | INFO | csv_hard_captions_key: neg_caption
|
| 54 |
+
2024-11-19,15:28:47 | INFO | csv_img_key: img_path
|
| 55 |
+
2024-11-19,15:28:47 | INFO | csv_separator: ,
|
| 56 |
+
2024-11-19,15:28:47 | INFO | dataset_resampled: False
|
| 57 |
+
2024-11-19,15:28:47 | INFO | dataset_type: csv
|
| 58 |
+
2024-11-19,15:28:47 | INFO | ddp_static_graph: False
|
| 59 |
+
2024-11-19,15:28:47 | INFO | debug: False
|
| 60 |
+
2024-11-19,15:28:47 | INFO | device: cuda:0
|
| 61 |
+
2024-11-19,15:28:47 | INFO | dist_backend: nccl
|
| 62 |
+
2024-11-19,15:28:47 | INFO | dist_url: env://
|
| 63 |
+
2024-11-19,15:28:47 | INFO | distributed: True
|
| 64 |
+
2024-11-19,15:28:47 | INFO | epochs: 3
|
| 65 |
+
2024-11-19,15:28:47 | INFO | eps: 1e-06
|
| 66 |
+
2024-11-19,15:28:47 | INFO | force_quick_gelu: True
|
| 67 |
+
2024-11-19,15:28:47 | INFO | gather_with_grad: False
|
| 68 |
+
2024-11-19,15:28:47 | INFO | grad_checkpointing: False
|
| 69 |
+
2024-11-19,15:28:47 | INFO | horovod: False
|
| 70 |
+
2024-11-19,15:28:47 | INFO | imagenet_v2: None
|
| 71 |
+
2024-11-19,15:28:47 | INFO | imagenet_val: None
|
| 72 |
+
2024-11-19,15:28:47 | INFO | local_loss: False
|
| 73 |
+
2024-11-19,15:28:47 | INFO | local_rank: 0
|
| 74 |
+
2024-11-19,15:28:47 | INFO | lock_image: False
|
| 75 |
+
2024-11-19,15:28:47 | INFO | lock_image_freeze_bn_stats: False
|
| 76 |
+
2024-11-19,15:28:47 | INFO | lock_image_unlocked_groups: 0
|
| 77 |
+
2024-11-19,15:28:47 | INFO | log_level: 20
|
| 78 |
+
2024-11-19,15:28:47 | INFO | log_local: False
|
| 79 |
+
2024-11-19,15:28:47 | INFO | log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 80 |
+
2024-11-19,15:28:47 | INFO | logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 81 |
+
2024-11-19,15:28:47 | INFO | lr: 5e-06
|
| 82 |
+
2024-11-19,15:28:47 | INFO | model: ViT-L-14-336
|
| 83 |
+
2024-11-19,15:28:47 | INFO | name: 2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 84 |
+
2024-11-19,15:28:47 | INFO | no_set_device_rank: False
|
| 85 |
+
2024-11-19,15:28:47 | INFO | norm_gradient_clip: None
|
| 86 |
+
2024-11-19,15:28:47 | INFO | precision: amp
|
| 87 |
+
2024-11-19,15:28:47 | INFO | pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 88 |
+
2024-11-19,15:28:47 | INFO | pretrained_image: False
|
| 89 |
+
2024-11-19,15:28:47 | INFO | rank: 0
|
| 90 |
+
2024-11-19,15:28:47 | INFO | report_to: wandb
|
| 91 |
+
2024-11-19,15:28:47 | INFO | resume: None
|
| 92 |
+
2024-11-19,15:28:47 | INFO | save_frequency: 1
|
| 93 |
+
2024-11-19,15:28:47 | INFO | save_most_recent: False
|
| 94 |
+
2024-11-19,15:28:47 | INFO | seed: 0
|
| 95 |
+
2024-11-19,15:28:47 | INFO | skip_scheduler: False
|
| 96 |
+
2024-11-19,15:28:47 | INFO | tensorboard: False
|
| 97 |
+
2024-11-19,15:28:47 | INFO | tensorboard_path:
|
| 98 |
+
2024-11-19,15:28:47 | INFO | torchscript: False
|
| 99 |
+
2024-11-19,15:28:47 | INFO | trace: False
|
| 100 |
+
2024-11-19,15:28:47 | INFO | train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 101 |
+
2024-11-19,15:28:47 | INFO | train_num_samples: None
|
| 102 |
+
2024-11-19,15:28:47 | INFO | use_bn_sync: False
|
| 103 |
+
2024-11-19,15:28:47 | INFO | val_data: None
|
| 104 |
+
2024-11-19,15:28:47 | INFO | val_frequency: 1
|
| 105 |
+
2024-11-19,15:28:47 | INFO | val_num_samples: None
|
| 106 |
+
2024-11-19,15:28:47 | INFO | wandb: True
|
| 107 |
+
2024-11-19,15:28:47 | INFO | wandb_notes:
|
| 108 |
+
2024-11-19,15:28:47 | INFO | wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 109 |
+
2024-11-19,15:28:47 | INFO | warmup: 0
|
| 110 |
+
2024-11-19,15:28:47 | INFO | wd: 0.1
|
| 111 |
+
2024-11-19,15:28:47 | INFO | workers: 4
|
| 112 |
+
2024-11-19,15:28:47 | INFO | world_size: 8
|
| 113 |
+
2024-11-19,15:28:47 | INFO | zeroshot_frequency: 2
|
| 114 |
+
2024-11-19,15:28:56 | INFO | Init a wandb project!
|
| 115 |
+
2024-11-19,15:29:01 | INFO | Start epoch 0
|
| 116 |
+
2024-11-19,15:29:05 | INFO | Train Epoch: 0 [ 512/2000000 (0%)] Loss: 5.9957 (5.996) Data (t): 1.197 Batch (t): 4.650, 110.096/s LR: 0.000005 Logit Scale: 100.000 - V4
|
| 117 |
+
2024-11-19,15:30:36 | INFO | Train Epoch: 0 [ 51712/2000000 (3%)] Loss: 2.7877 (4.392) Data (t): 0.000 Batch (t): 0.901, 569.369/s LR: 0.000005 Logit Scale: 99.996 - V4
|
| 118 |
+
2024-11-19,15:32:05 | INFO | Train Epoch: 0 [ 102912/2000000 (5%)] Loss: 2.4170 (3.733) Data (t): 0.001 Batch (t): 0.898, 570.934/s LR: 0.000005 Logit Scale: 99.995 - V4
|
| 119 |
+
2024-11-19,15:33:35 | INFO | Train Epoch: 0 [ 154112/2000000 (8%)] Loss: 2.2084 (3.352) Data (t): 0.001 Batch (t): 0.899, 571.294/s LR: 0.000005 Logit Scale: 99.994 - V4
|
| 120 |
+
2024-11-19,15:35:06 | INFO | Train Epoch: 0 [ 205312/2000000 (10%)] Loss: 2.3320 (3.148) Data (t): 0.001 Batch (t): 0.906, 569.565/s LR: 0.000005 Logit Scale: 99.993 - V4
|
| 121 |
+
2024-11-19,15:36:36 | INFO | Train Epoch: 0 [ 256512/2000000 (13%)] Loss: 2.2325 (2.996) Data (t): 0.001 Batch (t): 0.898, 570.133/s LR: 0.000005 Logit Scale: 99.991 - V4
|
| 122 |
+
2024-11-19,15:38:06 | INFO | Train Epoch: 0 [ 307712/2000000 (15%)] Loss: 1.9296 (2.843) Data (t): 0.001 Batch (t): 0.897, 572.652/s LR: 0.000005 Logit Scale: 99.987 - V4
|
| 123 |
+
2024-11-19,15:39:35 | INFO | Train Epoch: 0 [ 358912/2000000 (18%)] Loss: 2.1537 (2.757) Data (t): 0.001 Batch (t): 0.896, 571.429/s LR: 0.000005 Logit Scale: 99.984 - V4
|
| 124 |
+
2024-11-19,15:41:05 | INFO | Train Epoch: 0 [ 410112/2000000 (21%)] Loss: 2.1788 (2.693) Data (t): 0.001 Batch (t): 0.898, 571.671/s LR: 0.000005 Logit Scale: 99.984 - V4
|
| 125 |
+
2024-11-19,15:42:35 | INFO | Train Epoch: 0 [ 461312/2000000 (23%)] Loss: 2.0519 (2.629) Data (t): 0.001 Batch (t): 0.903, 572.804/s LR: 0.000005 Logit Scale: 99.982 - V4
|
| 126 |
+
2024-11-19,15:44:05 | INFO | Train Epoch: 0 [ 512512/2000000 (26%)] Loss: 1.8651 (2.559) Data (t): 0.001 Batch (t): 0.897, 572.202/s LR: 0.000005 Logit Scale: 99.980 - V4
|
| 127 |
+
2024-11-19,15:45:35 | INFO | Train Epoch: 0 [ 563712/2000000 (28%)] Loss: 2.0888 (2.520) Data (t): 0.001 Batch (t): 0.897, 571.065/s LR: 0.000005 Logit Scale: 99.977 - V4
|
| 128 |
+
2024-11-19,15:47:05 | INFO | Train Epoch: 0 [ 614912/2000000 (31%)] Loss: 1.9534 (2.477) Data (t): 0.001 Batch (t): 0.898, 570.971/s LR: 0.000005 Logit Scale: 99.975 - V4
|
| 129 |
+
2024-11-19,15:48:34 | INFO | Train Epoch: 0 [ 666112/2000000 (33%)] Loss: 1.7041 (2.421) Data (t): 0.001 Batch (t): 0.897, 571.105/s LR: 0.000005 Logit Scale: 99.975 - V4
|
| 130 |
+
2024-11-19,15:50:05 | INFO | Train Epoch: 0 [ 717312/2000000 (36%)] Loss: 1.8796 (2.385) Data (t): 0.001 Batch (t): 0.908, 570.693/s LR: 0.000005 Logit Scale: 99.971 - V4
|
| 131 |
+
2024-11-19,15:51:35 | INFO | Train Epoch: 0 [ 768512/2000000 (38%)] Loss: 1.8147 (2.350) Data (t): 0.001 Batch (t): 0.896, 572.011/s LR: 0.000005 Logit Scale: 99.971 - V4
|
| 132 |
+
2024-11-19,15:53:04 | INFO | Train Epoch: 0 [ 819712/2000000 (41%)] Loss: 2.0721 (2.333) Data (t): 0.001 Batch (t): 0.896, 573.465/s LR: 0.000005 Logit Scale: 99.968 - V4
|
| 133 |
+
2024-11-19,15:54:34 | INFO | Train Epoch: 0 [ 870912/2000000 (44%)] Loss: 1.9114 (2.310) Data (t): 0.001 Batch (t): 0.897, 571.653/s LR: 0.000005 Logit Scale: 99.966 - V4
|
| 134 |
+
2024-11-19,15:56:04 | INFO | Train Epoch: 0 [ 922112/2000000 (46%)] Loss: 1.9547 (2.291) Data (t): 0.001 Batch (t): 0.897, 570.605/s LR: 0.000005 Logit Scale: 99.965 - V4
|
| 135 |
+
2024-11-19,15:57:35 | INFO | Train Epoch: 0 [ 973312/2000000 (49%)] Loss: 1.8327 (2.268) Data (t): 0.001 Batch (t): 0.909, 569.498/s LR: 0.000005 Logit Scale: 99.964 - V4
|
| 136 |
+
2024-11-19,15:59:04 | INFO | Train Epoch: 0 [1024512/2000000 (51%)] Loss: 1.9088 (2.251) Data (t): 0.001 Batch (t): 0.898, 567.527/s LR: 0.000005 Logit Scale: 99.964 - V4
|
| 137 |
+
2024-11-19,16:00:34 | INFO | Train Epoch: 0 [1075712/2000000 (54%)] Loss: 1.9298 (2.236) Data (t): 0.001 Batch (t): 0.899, 570.316/s LR: 0.000005 Logit Scale: 99.963 - V4
|
| 138 |
+
2024-11-19,16:02:04 | INFO | Train Epoch: 0 [1126912/2000000 (56%)] Loss: 1.7629 (2.216) Data (t): 0.001 Batch (t): 0.898, 570.652/s LR: 0.000005 Logit Scale: 99.963 - V4
|
| 139 |
+
2024-11-19,16:03:34 | INFO | Train Epoch: 0 [1178112/2000000 (59%)] Loss: 1.8551 (2.201) Data (t): 0.001 Batch (t): 0.898, 570.355/s LR: 0.000005 Logit Scale: 99.965 - V4
|
| 140 |
+
2024-11-19,16:05:05 | INFO | Train Epoch: 0 [1229312/2000000 (61%)] Loss: 1.7685 (2.184) Data (t): 0.001 Batch (t): 0.909, 570.915/s LR: 0.000005 Logit Scale: 99.964 - V4
|
| 141 |
+
2024-11-19,16:06:34 | INFO | Train Epoch: 0 [1280512/2000000 (64%)] Loss: 1.8566 (2.171) Data (t): 0.001 Batch (t): 0.896, 572.136/s LR: 0.000004 Logit Scale: 99.965 - V4
|
| 142 |
+
2024-11-19,16:08:04 | INFO | Train Epoch: 0 [1331712/2000000 (67%)] Loss: 1.9796 (2.164) Data (t): 0.001 Batch (t): 0.897, 568.955/s LR: 0.000004 Logit Scale: 99.964 - V4
|
| 143 |
+
2024-11-19,16:09:34 | INFO | Train Epoch: 0 [1382912/2000000 (69%)] Loss: 1.8832 (2.154) Data (t): 0.001 Batch (t): 0.898, 570.155/s LR: 0.000004 Logit Scale: 99.965 - V4
|
| 144 |
+
2024-11-19,16:11:04 | INFO | Train Epoch: 0 [1434112/2000000 (72%)] Loss: 1.8433 (2.143) Data (t): 0.001 Batch (t): 0.897, 570.615/s LR: 0.000004 Logit Scale: 99.963 - V4
|
| 145 |
+
2024-11-19,16:12:33 | INFO | Train Epoch: 0 [1485312/2000000 (74%)] Loss: 1.8844 (2.135) Data (t): 0.001 Batch (t): 0.899, 567.456/s LR: 0.000004 Logit Scale: 99.964 - V4
|
| 146 |
+
2024-11-19,16:14:04 | INFO | Train Epoch: 0 [1536512/2000000 (77%)] Loss: 1.9147 (2.127) Data (t): 0.001 Batch (t): 0.907, 569.984/s LR: 0.000004 Logit Scale: 99.966 - V4
|
| 147 |
+
2024-11-19,16:15:34 | INFO | Train Epoch: 0 [1587712/2000000 (79%)] Loss: 1.7464 (2.116) Data (t): 0.001 Batch (t): 0.897, 570.759/s LR: 0.000004 Logit Scale: 99.967 - V4
|
| 148 |
+
2024-11-19,16:17:04 | INFO | Train Epoch: 0 [1638912/2000000 (82%)] Loss: 1.8658 (2.108) Data (t): 0.001 Batch (t): 0.898, 569.301/s LR: 0.000004 Logit Scale: 99.968 - V4
|
| 149 |
+
2024-11-19,16:18:33 | INFO | Train Epoch: 0 [1690112/2000000 (85%)] Loss: 1.8141 (2.099) Data (t): 0.001 Batch (t): 0.897, 573.014/s LR: 0.000004 Logit Scale: 99.968 - V4
|
| 150 |
+
2024-11-19,16:20:03 | INFO | Train Epoch: 0 [1741312/2000000 (87%)] Loss: 1.7476 (2.089) Data (t): 0.001 Batch (t): 0.900, 568.969/s LR: 0.000004 Logit Scale: 99.969 - V4
|
| 151 |
+
2024-11-19,16:21:34 | INFO | Train Epoch: 0 [1792512/2000000 (90%)] Loss: 1.7485 (2.080) Data (t): 0.001 Batch (t): 0.909, 569.211/s LR: 0.000004 Logit Scale: 99.969 - V4
|
| 152 |
+
2024-11-19,16:23:04 | INFO | Train Epoch: 0 [1843712/2000000 (92%)] Loss: 1.8207 (2.073) Data (t): 0.001 Batch (t): 0.898, 568.663/s LR: 0.000004 Logit Scale: 99.971 - V4
|
| 153 |
+
2024-11-19,16:24:34 | INFO | Train Epoch: 0 [1894912/2000000 (95%)] Loss: 1.7328 (2.064) Data (t): 0.001 Batch (t): 0.899, 572.522/s LR: 0.000004 Logit Scale: 99.973 - V4
|
| 154 |
+
2024-11-19,16:26:04 | INFO | Train Epoch: 0 [1946112/2000000 (97%)] Loss: 1.5711 (2.051) Data (t): 0.001 Batch (t): 0.899, 570.532/s LR: 0.000004 Logit Scale: 99.975 - V4
|
| 155 |
+
2024-11-19,16:27:34 | INFO | Train Epoch: 0 [1997312/2000000 (100%)] Loss: 1.7178 (2.043) Data (t): 0.001 Batch (t): 0.898, 571.033/s LR: 0.000004 Logit Scale: 99.977 - V4
|
| 156 |
+
2024-11-19,16:27:38 | INFO | Train Epoch: 0 [1999872/2000000 (100%)] Loss: 1.6936 (2.034) Data (t): 0.005 Batch (t): 0.895, 571.122/s LR: 0.000004 Logit Scale: 99.977 - V4
|
| 157 |
+
2024-11-19,16:27:43 | INFO | Start epoch 1
|
| 158 |
+
2024-11-19,16:27:45 | INFO | Train Epoch: 1 [ 512/2000000 (0%)] Loss: 1.8669 (1.867) Data (t): 1.031 Batch (t): 1.915, 267.326/s LR: 0.000004 Logit Scale: 99.977 - V4
|
| 159 |
+
2024-11-19,16:29:16 | INFO | Train Epoch: 1 [ 51712/2000000 (3%)] Loss: 1.6626 (1.765) Data (t): 0.001 Batch (t): 0.907, 570.171/s LR: 0.000004 Logit Scale: 99.980 - V4
|
| 160 |
+
2024-11-19,16:30:46 | INFO | Train Epoch: 1 [ 102912/2000000 (5%)] Loss: 1.5746 (1.701) Data (t): 0.001 Batch (t): 0.898, 569.641/s LR: 0.000004 Logit Scale: 99.982 - V4
|
| 161 |
+
2024-11-19,16:32:15 | INFO | Train Epoch: 1 [ 154112/2000000 (8%)] Loss: 1.8032 (1.727) Data (t): 0.001 Batch (t): 0.898, 568.967/s LR: 0.000004 Logit Scale: 99.984 - V4
|
| 162 |
+
2024-11-19,16:33:45 | INFO | Train Epoch: 1 [ 205312/2000000 (10%)] Loss: 1.7087 (1.723) Data (t): 0.001 Batch (t): 0.898, 571.700/s LR: 0.000004 Logit Scale: 99.988 - V4
|
| 163 |
+
2024-11-19,16:35:15 | INFO | Train Epoch: 1 [ 256512/2000000 (13%)] Loss: 1.6652 (1.714) Data (t): 0.001 Batch (t): 0.901, 569.765/s LR: 0.000003 Logit Scale: 99.991 - V4
|
| 164 |
+
2024-11-19,16:36:46 | INFO | Train Epoch: 1 [ 307712/2000000 (15%)] Loss: 1.6190 (1.700) Data (t): 0.001 Batch (t): 0.905, 569.930/s LR: 0.000003 Logit Scale: 99.994 - V4
|
| 165 |
+
2024-11-19,16:38:16 | INFO | Train Epoch: 1 [ 358912/2000000 (18%)] Loss: 1.8352 (1.717) Data (t): 0.001 Batch (t): 0.898, 568.760/s LR: 0.000003 Logit Scale: 99.996 - V4
|
| 166 |
+
2024-11-19,16:39:45 | INFO | Train Epoch: 1 [ 410112/2000000 (21%)] Loss: 1.6636 (1.711) Data (t): 0.001 Batch (t): 0.898, 573.033/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 167 |
+
2024-11-19,16:41:15 | INFO | Train Epoch: 1 [ 461312/2000000 (23%)] Loss: 1.6142 (1.701) Data (t): 0.001 Batch (t): 0.897, 571.555/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 168 |
+
2024-11-19,16:42:45 | INFO | Train Epoch: 1 [ 512512/2000000 (26%)] Loss: 1.6758 (1.699) Data (t): 0.001 Batch (t): 0.898, 569.545/s LR: 0.000003 Logit Scale: 99.999 - V4
|
| 169 |
+
2024-11-19,16:44:15 | INFO | Train Epoch: 1 [ 563712/2000000 (28%)] Loss: 1.7603 (1.704) Data (t): 0.001 Batch (t): 0.904, 568.421/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 170 |
+
2024-11-19,16:45:45 | INFO | Train Epoch: 1 [ 614912/2000000 (31%)] Loss: 1.6037 (1.696) Data (t): 0.001 Batch (t): 0.898, 570.934/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 171 |
+
2024-11-19,16:47:15 | INFO | Train Epoch: 1 [ 666112/2000000 (33%)] Loss: 1.7349 (1.699) Data (t): 0.001 Batch (t): 0.897, 569.741/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 172 |
+
2024-11-19,16:48:44 | INFO | Train Epoch: 1 [ 717312/2000000 (36%)] Loss: 1.7195 (1.700) Data (t): 0.001 Batch (t): 0.897, 567.906/s LR: 0.000003 Logit Scale: 99.999 - V4
|
| 173 |
+
2024-11-19,16:50:14 | INFO | Train Epoch: 1 [ 768512/2000000 (38%)] Loss: 1.7906 (1.706) Data (t): 0.001 Batch (t): 0.898, 570.482/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 174 |
+
2024-11-19,16:51:44 | INFO | Train Epoch: 1 [ 819712/2000000 (41%)] Loss: 1.6468 (1.703) Data (t): 0.001 Batch (t): 0.898, 570.196/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 175 |
+
2024-11-19,16:53:14 | INFO | Train Epoch: 1 [ 870912/2000000 (44%)] Loss: 1.8331 (1.710) Data (t): 0.001 Batch (t): 0.903, 571.648/s LR: 0.000003 Logit Scale: 99.999 - V4
|
| 176 |
+
2024-11-19,16:54:44 | INFO | Train Epoch: 1 [ 922112/2000000 (46%)] Loss: 1.6685 (1.708) Data (t): 0.001 Batch (t): 0.896, 571.512/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 177 |
+
2024-11-19,16:56:14 | INFO | Train Epoch: 1 [ 973312/2000000 (49%)] Loss: 1.6768 (1.706) Data (t): 0.001 Batch (t): 0.896, 573.670/s LR: 0.000003 Logit Scale: 100.000 - V4
|
| 178 |
+
2024-11-19,16:57:43 | INFO | Train Epoch: 1 [1024512/2000000 (51%)] Loss: 1.7772 (1.710) Data (t): 0.001 Batch (t): 0.897, 570.219/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 179 |
+
2024-11-19,16:59:13 | INFO | Train Epoch: 1 [1075712/2000000 (54%)] Loss: 1.4077 (1.696) Data (t): 0.001 Batch (t): 0.899, 570.241/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 180 |
+
2024-11-19,17:00:44 | INFO | Train Epoch: 1 [1126912/2000000 (56%)] Loss: 1.8494 (1.702) Data (t): 0.001 Batch (t): 0.905, 568.207/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 181 |
+
2024-11-19,17:02:14 | INFO | Train Epoch: 1 [1178112/2000000 (59%)] Loss: 1.8079 (1.707) Data (t): 0.001 Batch (t): 0.899, 569.389/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 182 |
+
2024-11-19,17:03:43 | INFO | Train Epoch: 1 [1229312/2000000 (61%)] Loss: 1.5961 (1.702) Data (t): 0.001 Batch (t): 0.899, 568.032/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 183 |
+
2024-11-19,17:05:13 | INFO | Train Epoch: 1 [1280512/2000000 (64%)] Loss: 1.5522 (1.697) Data (t): 0.001 Batch (t): 0.897, 570.287/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 184 |
+
2024-11-19,17:06:43 | INFO | Train Epoch: 1 [1331712/2000000 (67%)] Loss: 1.5550 (1.691) Data (t): 0.001 Batch (t): 0.899, 571.472/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 185 |
+
2024-11-19,17:08:14 | INFO | Train Epoch: 1 [1382912/2000000 (69%)] Loss: 1.7001 (1.692) Data (t): 0.001 Batch (t): 0.905, 570.941/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 186 |
+
2024-11-19,17:09:43 | INFO | Train Epoch: 1 [1434112/2000000 (72%)] Loss: 1.7038 (1.692) Data (t): 0.001 Batch (t): 0.898, 573.978/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 187 |
+
2024-11-19,17:11:13 | INFO | Train Epoch: 1 [1485312/2000000 (74%)] Loss: 1.6293 (1.690) Data (t): 0.001 Batch (t): 0.896, 573.044/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 188 |
+
2024-11-19,17:12:43 | INFO | Train Epoch: 1 [1536512/2000000 (77%)] Loss: 1.5268 (1.685) Data (t): 0.001 Batch (t): 0.897, 572.830/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 189 |
+
2024-11-19,17:14:12 | INFO | Train Epoch: 1 [1587712/2000000 (79%)] Loss: 1.5464 (1.680) Data (t): 0.001 Batch (t): 0.896, 568.953/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 190 |
+
2024-11-19,17:15:43 | INFO | Train Epoch: 1 [1638912/2000000 (82%)] Loss: 1.5259 (1.676) Data (t): 0.001 Batch (t): 0.907, 571.760/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 191 |
+
2024-11-19,17:17:13 | INFO | Train Epoch: 1 [1690112/2000000 (85%)] Loss: 1.4916 (1.670) Data (t): 0.001 Batch (t): 0.898, 571.271/s LR: 0.000002 Logit Scale: 99.999 - V4
|
| 192 |
+
2024-11-19,17:18:42 | INFO | Train Epoch: 1 [1741312/2000000 (87%)] Loss: 1.8018 (1.674) Data (t): 0.001 Batch (t): 0.897, 569.211/s LR: 0.000002 Logit Scale: 100.000 - V4
|
| 193 |
+
2024-11-19,17:20:12 | INFO | Train Epoch: 1 [1792512/2000000 (90%)] Loss: 1.7321 (1.676) Data (t): 0.001 Batch (t): 0.898, 572.494/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 194 |
+
2024-11-19,17:21:42 | INFO | Train Epoch: 1 [1843712/2000000 (92%)] Loss: 1.7267 (1.677) Data (t): 0.001 Batch (t): 0.898, 571.106/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 195 |
+
2024-11-19,17:23:13 | INFO | Train Epoch: 1 [1894912/2000000 (95%)] Loss: 1.5456 (1.674) Data (t): 0.001 Batch (t): 0.907, 569.895/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 196 |
+
2024-11-19,17:24:42 | INFO | Train Epoch: 1 [1946112/2000000 (97%)] Loss: 1.6294 (1.673) Data (t): 0.001 Batch (t): 0.897, 571.159/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 197 |
+
2024-11-19,17:26:12 | INFO | Train Epoch: 1 [1997312/2000000 (100%)] Loss: 1.6536 (1.672) Data (t): 0.001 Batch (t): 0.897, 570.040/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 198 |
+
2024-11-19,17:26:17 | INFO | Train Epoch: 1 [1999872/2000000 (100%)] Loss: 1.5622 (1.669) Data (t): 0.005 Batch (t): 0.895, 569.903/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 199 |
+
2024-11-19,17:26:21 | INFO | Start epoch 2
|
| 200 |
+
2024-11-19,17:26:23 | INFO | Train Epoch: 2 [ 512/2000000 (0%)] Loss: 1.7242 (1.724) Data (t): 0.981 Batch (t): 1.872, 273.458/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 201 |
+
2024-11-19,17:27:53 | INFO | Train Epoch: 2 [ 51712/2000000 (3%)] Loss: 1.7108 (1.717) Data (t): 0.001 Batch (t): 0.899, 571.474/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 202 |
+
2024-11-19,17:29:23 | INFO | Train Epoch: 2 [ 102912/2000000 (5%)] Loss: 1.5005 (1.645) Data (t): 0.001 Batch (t): 0.900, 572.427/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 203 |
+
2024-11-19,17:30:53 | INFO | Train Epoch: 2 [ 154112/2000000 (8%)] Loss: 1.5384 (1.618) Data (t): 0.001 Batch (t): 0.902, 569.623/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 204 |
+
2024-11-19,17:32:23 | INFO | Train Epoch: 2 [ 205312/2000000 (10%)] Loss: 1.5692 (1.609) Data (t): 0.001 Batch (t): 0.900, 573.816/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 205 |
+
2024-11-19,17:33:53 | INFO | Train Epoch: 2 [ 256512/2000000 (13%)] Loss: 1.6483 (1.615) Data (t): 0.001 Batch (t): 0.897, 572.838/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 206 |
+
2024-11-19,17:35:23 | INFO | Train Epoch: 2 [ 307712/2000000 (15%)] Loss: 1.7056 (1.628) Data (t): 0.001 Batch (t): 0.895, 570.674/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 207 |
+
2024-11-19,17:36:52 | INFO | Train Epoch: 2 [ 358912/2000000 (18%)] Loss: 1.5977 (1.624) Data (t): 0.001 Batch (t): 0.896, 570.512/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 208 |
+
2024-11-19,17:38:22 | INFO | Train Epoch: 2 [ 410112/2000000 (21%)] Loss: 1.5250 (1.613) Data (t): 0.001 Batch (t): 0.898, 572.520/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 209 |
+
2024-11-19,17:39:52 | INFO | Train Epoch: 2 [ 461312/2000000 (23%)] Loss: 1.5137 (1.603) Data (t): 0.001 Batch (t): 0.903, 571.206/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 210 |
+
2024-11-19,17:41:22 | INFO | Train Epoch: 2 [ 512512/2000000 (26%)] Loss: 1.6001 (1.603) Data (t): 0.001 Batch (t): 0.897, 571.950/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 211 |
+
2024-11-19,17:42:52 | INFO | Train Epoch: 2 [ 563712/2000000 (28%)] Loss: 1.6134 (1.604) Data (t): 0.001 Batch (t): 0.896, 570.163/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 212 |
+
2024-11-19,17:44:21 | INFO | Train Epoch: 2 [ 614912/2000000 (31%)] Loss: 1.7793 (1.617) Data (t): 0.001 Batch (t): 0.896, 569.077/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 213 |
+
2024-11-19,17:45:51 | INFO | Train Epoch: 2 [ 666112/2000000 (33%)] Loss: 1.6295 (1.618) Data (t): 0.001 Batch (t): 0.899, 571.097/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 214 |
+
2024-11-19,17:47:21 | INFO | Train Epoch: 2 [ 717312/2000000 (36%)] Loss: 1.6220 (1.619) Data (t): 0.001 Batch (t): 0.904, 570.421/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 215 |
+
2024-11-19,17:48:51 | INFO | Train Epoch: 2 [ 768512/2000000 (38%)] Loss: 1.6141 (1.618) Data (t): 0.001 Batch (t): 0.899, 568.045/s LR: 0.000001 Logit Scale: 100.000 - V4
|
| 216 |
+
2024-11-19,17:50:21 | INFO | Train Epoch: 2 [ 819712/2000000 (41%)] Loss: 1.5424 (1.614) Data (t): 0.001 Batch (t): 0.900, 568.090/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 217 |
+
2024-11-19,17:51:51 | INFO | Train Epoch: 2 [ 870912/2000000 (44%)] Loss: 1.6344 (1.615) Data (t): 0.001 Batch (t): 0.899, 570.721/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 218 |
+
2024-11-19,17:53:21 | INFO | Train Epoch: 2 [ 922112/2000000 (46%)] Loss: 1.5585 (1.612) Data (t): 0.001 Batch (t): 0.901, 570.929/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 219 |
+
2024-11-19,17:54:52 | INFO | Train Epoch: 2 [ 973312/2000000 (49%)] Loss: 1.6504 (1.614) Data (t): 0.001 Batch (t): 0.905, 569.907/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 220 |
+
2024-11-19,17:56:22 | INFO | Train Epoch: 2 [1024512/2000000 (51%)] Loss: 1.6103 (1.614) Data (t): 0.001 Batch (t): 0.899, 568.902/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 221 |
+
2024-11-19,17:57:52 | INFO | Train Epoch: 2 [1075712/2000000 (54%)] Loss: 1.6474 (1.615) Data (t): 0.001 Batch (t): 0.900, 570.905/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 222 |
+
2024-11-19,17:59:22 | INFO | Train Epoch: 2 [1126912/2000000 (56%)] Loss: 1.4019 (1.606) Data (t): 0.001 Batch (t): 0.899, 569.141/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 223 |
+
2024-11-19,18:00:52 | INFO | Train Epoch: 2 [1178112/2000000 (59%)] Loss: 1.6659 (1.608) Data (t): 0.001 Batch (t): 0.901, 570.899/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 224 |
+
2024-11-19,18:02:22 | INFO | Train Epoch: 2 [1229312/2000000 (61%)] Loss: 1.6503 (1.610) Data (t): 0.001 Batch (t): 0.905, 569.803/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 225 |
+
2024-11-19,18:03:52 | INFO | Train Epoch: 2 [1280512/2000000 (64%)] Loss: 1.5176 (1.607) Data (t): 0.001 Batch (t): 0.899, 572.951/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 226 |
+
2024-11-19,18:05:22 | INFO | Train Epoch: 2 [1331712/2000000 (67%)] Loss: 1.7743 (1.613) Data (t): 0.001 Batch (t): 0.899, 571.605/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 227 |
+
2024-11-19,18:06:52 | INFO | Train Epoch: 2 [1382912/2000000 (69%)] Loss: 1.5842 (1.612) Data (t): 0.001 Batch (t): 0.898, 570.921/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 228 |
+
2024-11-19,18:08:22 | INFO | Train Epoch: 2 [1434112/2000000 (72%)] Loss: 1.7001 (1.615) Data (t): 0.001 Batch (t): 0.898, 568.411/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 229 |
+
2024-11-19,18:09:52 | INFO | Train Epoch: 2 [1485312/2000000 (74%)] Loss: 1.6202 (1.615) Data (t): 0.001 Batch (t): 0.905, 568.251/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 230 |
+
2024-11-19,18:11:22 | INFO | Train Epoch: 2 [1536512/2000000 (77%)] Loss: 1.5798 (1.614) Data (t): 0.001 Batch (t): 0.900, 572.671/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 231 |
+
2024-11-19,18:12:52 | INFO | Train Epoch: 2 [1587712/2000000 (79%)] Loss: 1.5903 (1.613) Data (t): 0.001 Batch (t): 0.897, 570.863/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 232 |
+
2024-11-19,18:14:22 | INFO | Train Epoch: 2 [1638912/2000000 (82%)] Loss: 1.4327 (1.608) Data (t): 0.001 Batch (t): 0.897, 571.413/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 233 |
+
2024-11-19,18:15:51 | INFO | Train Epoch: 2 [1690112/2000000 (85%)] Loss: 1.5730 (1.607) Data (t): 0.001 Batch (t): 0.896, 569.513/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 234 |
+
2024-11-19,18:17:21 | INFO | Train Epoch: 2 [1741312/2000000 (87%)] Loss: 1.6404 (1.608) Data (t): 0.001 Batch (t): 0.900, 567.369/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 235 |
+
2024-11-19,18:18:52 | INFO | Train Epoch: 2 [1792512/2000000 (90%)] Loss: 1.6068 (1.608) Data (t): 0.001 Batch (t): 0.905, 571.641/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 236 |
+
2024-11-19,18:20:22 | INFO | Train Epoch: 2 [1843712/2000000 (92%)] Loss: 1.7877 (1.612) Data (t): 0.001 Batch (t): 0.898, 570.876/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 237 |
+
2024-11-19,18:21:51 | INFO | Train Epoch: 2 [1894912/2000000 (95%)] Loss: 1.5601 (1.611) Data (t): 0.001 Batch (t): 0.898, 567.361/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 238 |
+
2024-11-19,18:23:21 | INFO | Train Epoch: 2 [1946112/2000000 (97%)] Loss: 1.5902 (1.611) Data (t): 0.001 Batch (t): 0.898, 572.244/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 239 |
+
2024-11-19,18:24:51 | INFO | Train Epoch: 2 [1997312/2000000 (100%)] Loss: 1.6741 (1.612) Data (t): 0.001 Batch (t): 0.900, 569.954/s LR: 0.000000 Logit Scale: 100.000 - V4
|
| 240 |
+
2024-11-19,18:24:56 | INFO | Train Epoch: 2 [1999872/2000000 (100%)] Loss: 1.6024 (1.612) Data (t): 0.005 Batch (t): 0.896, 569.465/s LR: 0.000000 Logit Scale: 100.000 - V4
|
data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/params.txt
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size: 64
|
| 2 |
+
beta1: 0.9
|
| 3 |
+
beta2: 0.98
|
| 4 |
+
checkpoint_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/checkpoints
|
| 5 |
+
copy_codebase: False
|
| 6 |
+
csv_caption_key: caption
|
| 7 |
+
csv_hard_captions_key: neg_caption
|
| 8 |
+
csv_img_key: img_path
|
| 9 |
+
csv_separator: ,
|
| 10 |
+
dataset_resampled: False
|
| 11 |
+
dataset_type: csv
|
| 12 |
+
ddp_static_graph: False
|
| 13 |
+
debug: False
|
| 14 |
+
device: cuda:0
|
| 15 |
+
dist_backend: nccl
|
| 16 |
+
dist_url: env://
|
| 17 |
+
distributed: True
|
| 18 |
+
epochs: 3
|
| 19 |
+
eps: 1e-06
|
| 20 |
+
force_quick_gelu: True
|
| 21 |
+
gather_with_grad: False
|
| 22 |
+
grad_checkpointing: False
|
| 23 |
+
horovod: False
|
| 24 |
+
imagenet_v2: None
|
| 25 |
+
imagenet_val: None
|
| 26 |
+
local_loss: False
|
| 27 |
+
local_rank: 0
|
| 28 |
+
lock_image: False
|
| 29 |
+
lock_image_freeze_bn_stats: False
|
| 30 |
+
lock_image_unlocked_groups: 0
|
| 31 |
+
log_level: 20
|
| 32 |
+
log_local: False
|
| 33 |
+
log_path: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled/2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp/out.log
|
| 34 |
+
logs: data/trained_openclip/negative_logs/dvqa_qa_captions_new_sampled
|
| 35 |
+
lr: 5e-06
|
| 36 |
+
model: ViT-L-14-336
|
| 37 |
+
name: 2024_11_19-15_28_36-model_ViT-L-14-336-lr_5e-06-b_64-j_4-p_amp
|
| 38 |
+
no_set_device_rank: False
|
| 39 |
+
norm_gradient_clip: None
|
| 40 |
+
precision: amp
|
| 41 |
+
pretrained: data/openclip-vit-14-336/openclip_model.pt
|
| 42 |
+
pretrained_image: False
|
| 43 |
+
rank: 0
|
| 44 |
+
report_to: wandb
|
| 45 |
+
resume: None
|
| 46 |
+
save_frequency: 1
|
| 47 |
+
save_most_recent: False
|
| 48 |
+
seed: 0
|
| 49 |
+
skip_scheduler: False
|
| 50 |
+
tensorboard: False
|
| 51 |
+
tensorboard_path:
|
| 52 |
+
torchscript: False
|
| 53 |
+
trace: False
|
| 54 |
+
train_data: csv_data/dvqa_qa_captions_new_sampled.csv
|
| 55 |
+
train_num_samples: None
|
| 56 |
+
use_bn_sync: False
|
| 57 |
+
val_data: None
|
| 58 |
+
val_frequency: 1
|
| 59 |
+
val_num_samples: None
|
| 60 |
+
wandb: True
|
| 61 |
+
wandb_notes:
|
| 62 |
+
wandb_project: neg-clip-dvqa_qa_captions_new_sampled
|
| 63 |
+
warmup: 0
|
| 64 |
+
wd: 0.1
|
| 65 |
+
workers: 4
|
| 66 |
+
world_size: 8
|
| 67 |
+
zeroshot_frequency: 2
|