bolinlai commited on
Commit
44c0a08
·
1 Parent(s): b46d9d3

first version

Browse files
Files changed (23) hide show
  1. ego4d_diffusion_with_vllm_feature.ckpt +3 -0
  2. llava-llama-2-13b-chat-forecasting-finetune/config.json +39 -0
  3. llava-llama-2-13b-chat-forecasting-finetune/generation_config.json +7 -0
  4. llava-llama-2-13b-chat-forecasting-finetune/latest +1 -0
  5. llava-llama-2-13b-chat-forecasting-finetune/pytorch_model-00001-of-00003.bin +3 -0
  6. llava-llama-2-13b-chat-forecasting-finetune/pytorch_model-00002-of-00003.bin +3 -0
  7. llava-llama-2-13b-chat-forecasting-finetune/pytorch_model-00003-of-00003.bin +3 -0
  8. llava-llama-2-13b-chat-forecasting-finetune/pytorch_model.bin.index.json +803 -0
  9. llava-llama-2-13b-chat-forecasting-finetune/rng_state_0.pth +3 -0
  10. llava-llama-2-13b-chat-forecasting-finetune/rng_state_1.pth +3 -0
  11. llava-llama-2-13b-chat-forecasting-finetune/rng_state_2.pth +3 -0
  12. llava-llama-2-13b-chat-forecasting-finetune/rng_state_3.pth +3 -0
  13. llava-llama-2-13b-chat-forecasting-finetune/rng_state_4.pth +3 -0
  14. llava-llama-2-13b-chat-forecasting-finetune/rng_state_5.pth +3 -0
  15. llava-llama-2-13b-chat-forecasting-finetune/rng_state_6.pth +3 -0
  16. llava-llama-2-13b-chat-forecasting-finetune/rng_state_7.pth +3 -0
  17. llava-llama-2-13b-chat-forecasting-finetune/special_tokens_map.json +24 -0
  18. llava-llama-2-13b-chat-forecasting-finetune/tokenizer.model +3 -0
  19. llava-llama-2-13b-chat-forecasting-finetune/tokenizer_config.json +35 -0
  20. llava-llama-2-13b-chat-forecasting-finetune/trainer_state.json +2716 -0
  21. llava-llama-2-13b-chat-forecasting-finetune/training_args.bin +3 -0
  22. llava-llama-2-13b-chat-forecasting-finetune/zero_to_fp32.py +578 -0
  23. scaleup_training_ego4d_eval.ckpt +3 -0
ego4d_diffusion_with_vllm_feature.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b84b56ffbb5363bd720e590398ded8b5cbb5ff85b9432bca569b01a86f300978
3
+ size 7779511314
llava-llama-2-13b-chat-forecasting-finetune/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/checkpoints/bolinlai/llava/released/models--liuhaotian--llava-llama-2-13b-chat-lightning-preview/snapshots/bcda0227a7f371117a195ef0af88c1616a830520",
3
+ "architectures": [
4
+ "LlavaLlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "freeze_mm_mlp_adapter": false,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 5120,
11
+ "image_aspect_ratio": "square",
12
+ "image_grid_pinpoints": null,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 13824,
15
+ "max_position_embeddings": 4096,
16
+ "mm_hidden_size": 1024,
17
+ "mm_resampler_type": null,
18
+ "mm_use_im_patch_token": false,
19
+ "mm_use_im_start_end": false,
20
+ "mm_vision_select_feature": "patch",
21
+ "mm_vision_select_layer": -2,
22
+ "mm_vision_tower": "openai/clip-vit-large-patch14",
23
+ "model_type": "llava",
24
+ "num_attention_heads": 40,
25
+ "num_hidden_layers": 40,
26
+ "num_key_value_heads": 40,
27
+ "pad_token_id": 0,
28
+ "pretraining_tp": 1,
29
+ "rms_norm_eps": 1e-05,
30
+ "rope_scaling": null,
31
+ "tie_word_embeddings": false,
32
+ "torch_dtype": "bfloat16",
33
+ "transformers_version": "4.31.0",
34
+ "tune_mm_mlp_adapter": false,
35
+ "tune_mm_vision_resampler": false,
36
+ "use_cache": false,
37
+ "use_mm_proj": true,
38
+ "vocab_size": 32000
39
+ }
llava-llama-2-13b-chat-forecasting-finetune/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.31.0"
7
+ }
llava-llama-2-13b-chat-forecasting-finetune/latest ADDED
@@ -0,0 +1 @@
 
 
1
+ global_step450
llava-llama-2-13b-chat-forecasting-finetune/pytorch_model-00001-of-00003.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f35c573bc8e9dd0b0881f63e09cf960c5046dd113f343fb0088e969594538d15
3
+ size 9948726510
llava-llama-2-13b-chat-forecasting-finetune/pytorch_model-00002-of-00003.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bda7e1ef460d292c1ef88fa0e2686323dd69ca5531da62c491a6a1c51e4756d0
3
+ size 9904162976
llava-llama-2-13b-chat-forecasting-finetune/pytorch_model-00003-of-00003.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:279fb8b5a34da2ba5b3bb6d666240a3b7412d67199cbb459aadca5c8fec57e85
3
+ size 6795987143
llava-llama-2-13b-chat-forecasting-finetune/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,803 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 26648589312
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00003-of-00003.bin",
7
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00003.bin",
8
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
9
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
10
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
11
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
12
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
13
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
14
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
15
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
16
+ "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
17
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
18
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
19
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
20
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
21
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
22
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
23
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
24
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
25
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
26
+ "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
27
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
28
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
29
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
30
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
31
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
32
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
33
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
34
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
35
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
36
+ "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
37
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
38
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
39
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
40
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
41
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
42
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
43
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
44
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
45
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
46
+ "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
47
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
48
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
49
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
50
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
51
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
52
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
53
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
54
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
55
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
56
+ "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
57
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
58
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
59
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
60
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
61
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
62
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
63
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
64
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
65
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
66
+ "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
67
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
68
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
69
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
70
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
71
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
72
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
73
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
74
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
75
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
76
+ "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
77
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
78
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
79
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
80
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
81
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
82
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
83
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
84
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
85
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
86
+ "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
87
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
88
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
89
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
90
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
91
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
92
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
93
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
94
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
95
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
96
+ "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
97
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
98
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
99
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
100
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
101
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
102
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
103
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
104
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
105
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
106
+ "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
107
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
108
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
109
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
110
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
111
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
112
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
113
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
114
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
115
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
116
+ "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
117
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
118
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
119
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
120
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
121
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
122
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
123
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
124
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
125
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
126
+ "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
127
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
128
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
129
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
130
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
131
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
132
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
133
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
134
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
135
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
136
+ "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
137
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
138
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
139
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
140
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
141
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
142
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
143
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
144
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
145
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
146
+ "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
147
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
148
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
149
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
150
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
151
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
152
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
153
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
154
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
155
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
156
+ "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
157
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
158
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
159
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
160
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
161
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
162
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
163
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
164
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
165
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
166
+ "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
167
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
168
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
169
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
170
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
171
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
172
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
173
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
174
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
175
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
176
+ "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
177
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
178
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
179
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
180
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
181
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
182
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
183
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
184
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
185
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
186
+ "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
187
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
188
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
189
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
190
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
191
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
192
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
193
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
194
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
195
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
196
+ "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
197
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
198
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
199
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
200
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
201
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
202
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
203
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
204
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
205
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
206
+ "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
207
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
208
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
209
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
210
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
211
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
212
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
213
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
214
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
215
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
216
+ "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
217
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
218
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
219
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
220
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
221
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
222
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
223
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
224
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
225
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
226
+ "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
227
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
228
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00003.bin",
229
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00003.bin",
230
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
231
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
232
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00003.bin",
233
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
234
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
235
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
236
+ "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
237
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
238
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
239
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
240
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
241
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
242
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
243
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
244
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
245
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
246
+ "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
247
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
248
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
249
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
250
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00003.bin",
251
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00003.bin",
252
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
253
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00003.bin",
254
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00003.bin",
255
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00003.bin",
256
+ "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00003.bin",
257
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00003.bin",
258
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
259
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
260
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
261
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
262
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
263
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
264
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
265
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
266
+ "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
267
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
268
+ "model.layers.32.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
269
+ "model.layers.32.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
270
+ "model.layers.32.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
271
+ "model.layers.32.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
272
+ "model.layers.32.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
273
+ "model.layers.32.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
274
+ "model.layers.32.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
275
+ "model.layers.32.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
276
+ "model.layers.32.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
277
+ "model.layers.32.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
278
+ "model.layers.33.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
279
+ "model.layers.33.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
280
+ "model.layers.33.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
281
+ "model.layers.33.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
282
+ "model.layers.33.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
283
+ "model.layers.33.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
284
+ "model.layers.33.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
285
+ "model.layers.33.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
286
+ "model.layers.33.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
287
+ "model.layers.33.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
288
+ "model.layers.34.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
289
+ "model.layers.34.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
290
+ "model.layers.34.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
291
+ "model.layers.34.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
292
+ "model.layers.34.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
293
+ "model.layers.34.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
294
+ "model.layers.34.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
295
+ "model.layers.34.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
296
+ "model.layers.34.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
297
+ "model.layers.34.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
298
+ "model.layers.35.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
299
+ "model.layers.35.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
300
+ "model.layers.35.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
301
+ "model.layers.35.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
302
+ "model.layers.35.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
303
+ "model.layers.35.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
304
+ "model.layers.35.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
305
+ "model.layers.35.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
306
+ "model.layers.35.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
307
+ "model.layers.35.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
308
+ "model.layers.36.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
309
+ "model.layers.36.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
310
+ "model.layers.36.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
311
+ "model.layers.36.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
312
+ "model.layers.36.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
313
+ "model.layers.36.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
314
+ "model.layers.36.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
315
+ "model.layers.36.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
316
+ "model.layers.36.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
317
+ "model.layers.36.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
318
+ "model.layers.37.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
319
+ "model.layers.37.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
320
+ "model.layers.37.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
321
+ "model.layers.37.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
322
+ "model.layers.37.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
323
+ "model.layers.37.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
324
+ "model.layers.37.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
325
+ "model.layers.37.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
326
+ "model.layers.37.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
327
+ "model.layers.37.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
328
+ "model.layers.38.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
329
+ "model.layers.38.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
330
+ "model.layers.38.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
331
+ "model.layers.38.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
332
+ "model.layers.38.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
333
+ "model.layers.38.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
334
+ "model.layers.38.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
335
+ "model.layers.38.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
336
+ "model.layers.38.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
337
+ "model.layers.38.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
338
+ "model.layers.39.input_layernorm.weight": "pytorch_model-00003-of-00003.bin",
339
+ "model.layers.39.mlp.down_proj.weight": "pytorch_model-00003-of-00003.bin",
340
+ "model.layers.39.mlp.gate_proj.weight": "pytorch_model-00003-of-00003.bin",
341
+ "model.layers.39.mlp.up_proj.weight": "pytorch_model-00003-of-00003.bin",
342
+ "model.layers.39.post_attention_layernorm.weight": "pytorch_model-00003-of-00003.bin",
343
+ "model.layers.39.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
344
+ "model.layers.39.self_attn.o_proj.weight": "pytorch_model-00003-of-00003.bin",
345
+ "model.layers.39.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
346
+ "model.layers.39.self_attn.rotary_emb.inv_freq": "pytorch_model-00003-of-00003.bin",
347
+ "model.layers.39.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
348
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
349
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
350
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
351
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
352
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
353
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
354
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
355
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
356
+ "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
357
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
358
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
359
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
360
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
361
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
362
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
363
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
364
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
365
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
366
+ "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
367
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
368
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
369
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
370
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
371
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
372
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
373
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
374
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
375
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
376
+ "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
377
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
378
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
379
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
380
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
381
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
382
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
383
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
384
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
385
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
386
+ "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
387
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
388
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
389
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
390
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
391
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
392
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
393
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
394
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
395
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
396
+ "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
397
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
398
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00003.bin",
399
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00003.bin",
400
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00003.bin",
401
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00003.bin",
402
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00003.bin",
403
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00003.bin",
404
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00003.bin",
405
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00003.bin",
406
+ "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00003.bin",
407
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00003.bin",
408
+ "model.mm_projector.bias": "pytorch_model-00003-of-00003.bin",
409
+ "model.mm_projector.weight": "pytorch_model-00003-of-00003.bin",
410
+ "model.norm.weight": "pytorch_model-00003-of-00003.bin",
411
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "pytorch_model-00003-of-00003.bin",
412
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "pytorch_model-00003-of-00003.bin",
413
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "pytorch_model-00003-of-00003.bin",
414
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
415
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
416
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
417
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
418
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
419
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
420
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
421
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
422
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
423
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
424
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
425
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
426
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
427
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
428
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
429
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
430
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
431
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
432
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
433
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
434
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
435
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
436
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
437
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
438
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
439
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
440
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
441
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
442
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
443
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
444
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
445
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
446
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
447
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
448
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
449
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
450
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
451
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
452
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
453
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
454
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
455
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
456
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
457
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
458
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
459
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
460
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
461
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
462
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
463
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
464
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
465
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
466
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
467
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
468
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
469
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
470
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
471
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
472
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
473
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
474
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
475
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
476
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
477
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
478
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
479
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
480
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
481
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
482
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
483
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
484
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
485
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
486
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
487
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
488
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
489
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
490
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
491
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
492
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
493
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
494
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
495
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
496
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
497
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
498
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
499
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
500
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
501
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
502
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
503
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
504
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
505
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
506
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
507
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
508
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
509
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
510
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
511
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
512
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
513
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
514
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
515
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
516
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
517
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
518
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
519
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
520
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
521
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
522
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
523
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
524
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
525
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
526
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
527
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
528
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
529
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
530
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
531
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
532
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
533
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
534
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
535
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
536
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
537
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
538
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
539
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
540
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
541
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
542
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
543
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
544
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
545
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
546
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
547
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
548
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
549
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
550
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
551
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
552
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
553
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
554
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
555
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
556
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
557
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
558
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
559
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
560
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
561
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
562
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
563
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
564
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
565
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
566
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
567
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
568
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
569
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
570
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
571
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
572
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
573
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
574
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
575
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
576
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
577
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
578
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
579
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
580
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
581
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
582
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
583
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
584
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
585
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
586
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
587
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
588
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
589
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
590
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
591
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
592
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
593
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
594
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
595
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
596
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
597
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
598
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
599
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
600
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
601
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
602
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
603
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
604
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
605
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
606
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
607
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
608
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
609
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
610
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
611
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
612
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
613
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
614
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
615
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
616
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
617
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
618
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
619
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
620
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
621
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
622
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
623
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
624
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
625
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
626
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
627
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
628
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
629
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
630
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
631
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
632
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
633
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
634
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
635
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
636
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
637
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
638
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
639
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
640
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
641
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
642
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
643
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
644
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
645
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
646
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
647
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
648
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
649
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
650
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
651
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
652
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
653
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
654
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
655
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
656
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
657
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
658
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
659
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
660
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
661
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
662
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
663
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
664
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
665
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
666
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
667
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
668
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
669
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
670
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
671
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
672
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
673
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
674
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
675
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
676
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
677
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
678
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
679
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
680
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
681
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
682
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
683
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
684
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
685
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
686
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
687
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
688
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
689
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
690
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
691
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
692
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
693
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
694
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
695
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
696
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
697
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
698
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
699
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
700
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
701
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
702
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
703
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
704
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
705
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
706
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
707
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
708
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
709
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
710
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
711
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
712
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
713
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
714
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
715
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
716
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
717
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
718
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
719
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
720
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
721
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
722
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
723
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
724
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
725
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
726
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
727
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
728
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
729
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
730
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
731
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
732
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
733
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
734
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
735
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
736
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
737
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
738
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
739
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
740
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
741
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
742
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
743
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
744
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
745
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
746
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
747
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
748
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
749
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
750
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
751
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
752
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
753
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
754
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
755
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
756
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
757
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
758
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
759
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
760
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
761
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
762
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
763
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
764
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
765
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
766
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
767
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
768
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
769
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
770
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
771
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
772
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
773
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
774
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
775
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
776
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
777
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
778
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
779
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
780
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
781
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
782
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "pytorch_model-00003-of-00003.bin",
783
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "pytorch_model-00003-of-00003.bin",
784
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "pytorch_model-00003-of-00003.bin",
785
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "pytorch_model-00003-of-00003.bin",
786
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "pytorch_model-00003-of-00003.bin",
787
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "pytorch_model-00003-of-00003.bin",
788
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "pytorch_model-00003-of-00003.bin",
789
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00003-of-00003.bin",
790
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "pytorch_model-00003-of-00003.bin",
791
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "pytorch_model-00003-of-00003.bin",
792
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "pytorch_model-00003-of-00003.bin",
793
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "pytorch_model-00003-of-00003.bin",
794
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "pytorch_model-00003-of-00003.bin",
795
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "pytorch_model-00003-of-00003.bin",
796
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "pytorch_model-00003-of-00003.bin",
797
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "pytorch_model-00003-of-00003.bin",
798
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "pytorch_model-00003-of-00003.bin",
799
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "pytorch_model-00003-of-00003.bin",
800
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "pytorch_model-00003-of-00003.bin",
801
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "pytorch_model-00003-of-00003.bin"
802
+ }
803
+ }
llava-llama-2-13b-chat-forecasting-finetune/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c1d4aab692d41c18c919a673e506a5264ad9de9ff132b0de5dab52510157c75
3
+ size 21687
llava-llama-2-13b-chat-forecasting-finetune/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72575f2b4522908157bba855533d15354ced6598ecdc09ca8ce5417fa99c9b92
3
+ size 21687
llava-llama-2-13b-chat-forecasting-finetune/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eabb43b1fac99cd8e34e58482a942b31a1855cd53120560e1428e8a2aae185e9
3
+ size 21687
llava-llama-2-13b-chat-forecasting-finetune/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfd9568c2d39f428f86eb5a99beb6f19ac57f3f024ec4494b8cadd107d148298
3
+ size 21687
llava-llama-2-13b-chat-forecasting-finetune/rng_state_4.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21ab6cb42d703108752ec781af0a5227b721321c411bb7f7bca00b6ec4e15324
3
+ size 21687
llava-llama-2-13b-chat-forecasting-finetune/rng_state_5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9fe828193c13a4a1adccaf24735d85428cb925bfa6204603f3a0a4a34c8cec9
3
+ size 21687
llava-llama-2-13b-chat-forecasting-finetune/rng_state_6.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36e15c4dcb706b97a74d0f8259d6466dd2e46e60fd71dcb2c6d99e1e9ff2f204
3
+ size 21687
llava-llama-2-13b-chat-forecasting-finetune/rng_state_7.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a467ddda52bc75625e56adc924121f89adb1e65a5733eec05f6284586a9a926
3
+ size 21687
llava-llama-2-13b-chat-forecasting-finetune/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
llava-llama-2-13b-chat-forecasting-finetune/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
llava-llama-2-13b-chat-forecasting-finetune/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": true,
22
+ "model_max_length": 2048,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
llava-llama-2-13b-chat-forecasting-finetune/trainer_state.json ADDED
@@ -0,0 +1,2716 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 2.7439024390243905,
5
+ "global_step": 450,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.01,
12
+ "learning_rate": 0.0,
13
+ "loss": 1.7617,
14
+ "step": 1
15
+ },
16
+ {
17
+ "epoch": 0.01,
18
+ "learning_rate": 4.6275642631951835e-06,
19
+ "loss": 0.9497,
20
+ "step": 2
21
+ },
22
+ {
23
+ "epoch": 0.02,
24
+ "learning_rate": 7.3345158268416935e-06,
25
+ "loss": 0.9375,
26
+ "step": 3
27
+ },
28
+ {
29
+ "epoch": 0.02,
30
+ "learning_rate": 9.255128526390367e-06,
31
+ "loss": 0.6309,
32
+ "step": 4
33
+ },
34
+ {
35
+ "epoch": 0.03,
36
+ "learning_rate": 1.0744871473609633e-05,
37
+ "loss": 0.478,
38
+ "step": 5
39
+ },
40
+ {
41
+ "epoch": 0.04,
42
+ "learning_rate": 1.1962080090036879e-05,
43
+ "loss": 0.5774,
44
+ "step": 6
45
+ },
46
+ {
47
+ "epoch": 0.04,
48
+ "learning_rate": 1.299121531141887e-05,
49
+ "loss": 0.4492,
50
+ "step": 7
51
+ },
52
+ {
53
+ "epoch": 0.05,
54
+ "learning_rate": 1.388269278958555e-05,
55
+ "loss": 0.4519,
56
+ "step": 8
57
+ },
58
+ {
59
+ "epoch": 0.05,
60
+ "learning_rate": 1.4669031653683387e-05,
61
+ "loss": 0.4429,
62
+ "step": 9
63
+ },
64
+ {
65
+ "epoch": 0.06,
66
+ "learning_rate": 1.537243573680482e-05,
67
+ "loss": 0.437,
68
+ "step": 10
69
+ },
70
+ {
71
+ "epoch": 0.07,
72
+ "learning_rate": 1.600874212937343e-05,
73
+ "loss": 0.4043,
74
+ "step": 11
75
+ },
76
+ {
77
+ "epoch": 0.07,
78
+ "learning_rate": 1.6589644353232063e-05,
79
+ "loss": 0.3865,
80
+ "step": 12
81
+ },
82
+ {
83
+ "epoch": 0.08,
84
+ "learning_rate": 1.712402259777778e-05,
85
+ "loss": 0.3904,
86
+ "step": 13
87
+ },
88
+ {
89
+ "epoch": 0.09,
90
+ "learning_rate": 1.7618779574614054e-05,
91
+ "loss": 0.3884,
92
+ "step": 14
93
+ },
94
+ {
95
+ "epoch": 0.09,
96
+ "learning_rate": 1.8079387300451327e-05,
97
+ "loss": 0.3555,
98
+ "step": 15
99
+ },
100
+ {
101
+ "epoch": 0.1,
102
+ "learning_rate": 1.8510257052780734e-05,
103
+ "loss": 0.3621,
104
+ "step": 16
105
+ },
106
+ {
107
+ "epoch": 0.1,
108
+ "learning_rate": 1.891499697130832e-05,
109
+ "loss": 0.3779,
110
+ "step": 17
111
+ },
112
+ {
113
+ "epoch": 0.11,
114
+ "learning_rate": 1.929659591687857e-05,
115
+ "loss": 0.3604,
116
+ "step": 18
117
+ },
118
+ {
119
+ "epoch": 0.12,
120
+ "learning_rate": 1.9657557553855117e-05,
121
+ "loss": 0.3112,
122
+ "step": 19
123
+ },
124
+ {
125
+ "epoch": 0.12,
126
+ "learning_rate": 2e-05,
127
+ "loss": 0.3611,
128
+ "step": 20
129
+ },
130
+ {
131
+ "epoch": 0.13,
132
+ "learning_rate": 2e-05,
133
+ "loss": 0.3394,
134
+ "step": 21
135
+ },
136
+ {
137
+ "epoch": 0.13,
138
+ "learning_rate": 2e-05,
139
+ "loss": 0.3376,
140
+ "step": 22
141
+ },
142
+ {
143
+ "epoch": 0.14,
144
+ "learning_rate": 2e-05,
145
+ "loss": 0.3569,
146
+ "step": 23
147
+ },
148
+ {
149
+ "epoch": 0.15,
150
+ "learning_rate": 2e-05,
151
+ "loss": 0.3665,
152
+ "step": 24
153
+ },
154
+ {
155
+ "epoch": 0.15,
156
+ "learning_rate": 2e-05,
157
+ "loss": 0.3387,
158
+ "step": 25
159
+ },
160
+ {
161
+ "epoch": 0.16,
162
+ "learning_rate": 2e-05,
163
+ "loss": 0.3163,
164
+ "step": 26
165
+ },
166
+ {
167
+ "epoch": 0.16,
168
+ "learning_rate": 2e-05,
169
+ "loss": 0.3247,
170
+ "step": 27
171
+ },
172
+ {
173
+ "epoch": 0.17,
174
+ "learning_rate": 2e-05,
175
+ "loss": 0.2898,
176
+ "step": 28
177
+ },
178
+ {
179
+ "epoch": 0.18,
180
+ "learning_rate": 2e-05,
181
+ "loss": 0.3478,
182
+ "step": 29
183
+ },
184
+ {
185
+ "epoch": 0.18,
186
+ "learning_rate": 2e-05,
187
+ "loss": 0.3394,
188
+ "step": 30
189
+ },
190
+ {
191
+ "epoch": 0.19,
192
+ "learning_rate": 2e-05,
193
+ "loss": 0.3236,
194
+ "step": 31
195
+ },
196
+ {
197
+ "epoch": 0.2,
198
+ "learning_rate": 2e-05,
199
+ "loss": 0.328,
200
+ "step": 32
201
+ },
202
+ {
203
+ "epoch": 0.2,
204
+ "learning_rate": 2e-05,
205
+ "loss": 0.3147,
206
+ "step": 33
207
+ },
208
+ {
209
+ "epoch": 0.21,
210
+ "learning_rate": 2e-05,
211
+ "loss": 0.3195,
212
+ "step": 34
213
+ },
214
+ {
215
+ "epoch": 0.21,
216
+ "learning_rate": 2e-05,
217
+ "loss": 0.309,
218
+ "step": 35
219
+ },
220
+ {
221
+ "epoch": 0.22,
222
+ "learning_rate": 2e-05,
223
+ "loss": 0.3364,
224
+ "step": 36
225
+ },
226
+ {
227
+ "epoch": 0.23,
228
+ "learning_rate": 2e-05,
229
+ "loss": 0.3057,
230
+ "step": 37
231
+ },
232
+ {
233
+ "epoch": 0.23,
234
+ "learning_rate": 2e-05,
235
+ "loss": 0.3101,
236
+ "step": 38
237
+ },
238
+ {
239
+ "epoch": 0.24,
240
+ "learning_rate": 2e-05,
241
+ "loss": 0.356,
242
+ "step": 39
243
+ },
244
+ {
245
+ "epoch": 0.24,
246
+ "learning_rate": 2e-05,
247
+ "loss": 0.3015,
248
+ "step": 40
249
+ },
250
+ {
251
+ "epoch": 0.25,
252
+ "learning_rate": 2e-05,
253
+ "loss": 0.3142,
254
+ "step": 41
255
+ },
256
+ {
257
+ "epoch": 0.26,
258
+ "learning_rate": 2e-05,
259
+ "loss": 0.3322,
260
+ "step": 42
261
+ },
262
+ {
263
+ "epoch": 0.26,
264
+ "learning_rate": 2e-05,
265
+ "loss": 0.3276,
266
+ "step": 43
267
+ },
268
+ {
269
+ "epoch": 0.27,
270
+ "learning_rate": 2e-05,
271
+ "loss": 0.363,
272
+ "step": 44
273
+ },
274
+ {
275
+ "epoch": 0.27,
276
+ "learning_rate": 2e-05,
277
+ "loss": 0.3447,
278
+ "step": 45
279
+ },
280
+ {
281
+ "epoch": 0.28,
282
+ "learning_rate": 2e-05,
283
+ "loss": 0.3438,
284
+ "step": 46
285
+ },
286
+ {
287
+ "epoch": 0.29,
288
+ "learning_rate": 2e-05,
289
+ "loss": 0.3313,
290
+ "step": 47
291
+ },
292
+ {
293
+ "epoch": 0.29,
294
+ "learning_rate": 2e-05,
295
+ "loss": 0.2706,
296
+ "step": 48
297
+ },
298
+ {
299
+ "epoch": 0.3,
300
+ "learning_rate": 2e-05,
301
+ "loss": 0.3207,
302
+ "step": 49
303
+ },
304
+ {
305
+ "epoch": 0.3,
306
+ "learning_rate": 2e-05,
307
+ "loss": 0.2988,
308
+ "step": 50
309
+ },
310
+ {
311
+ "epoch": 0.31,
312
+ "learning_rate": 2e-05,
313
+ "loss": 0.324,
314
+ "step": 51
315
+ },
316
+ {
317
+ "epoch": 0.32,
318
+ "learning_rate": 2e-05,
319
+ "loss": 0.343,
320
+ "step": 52
321
+ },
322
+ {
323
+ "epoch": 0.32,
324
+ "learning_rate": 2e-05,
325
+ "loss": 0.2748,
326
+ "step": 53
327
+ },
328
+ {
329
+ "epoch": 0.33,
330
+ "learning_rate": 2e-05,
331
+ "loss": 0.2943,
332
+ "step": 54
333
+ },
334
+ {
335
+ "epoch": 0.34,
336
+ "learning_rate": 2e-05,
337
+ "loss": 0.3333,
338
+ "step": 55
339
+ },
340
+ {
341
+ "epoch": 0.34,
342
+ "learning_rate": 2e-05,
343
+ "loss": 0.3104,
344
+ "step": 56
345
+ },
346
+ {
347
+ "epoch": 0.35,
348
+ "learning_rate": 2e-05,
349
+ "loss": 0.3447,
350
+ "step": 57
351
+ },
352
+ {
353
+ "epoch": 0.35,
354
+ "learning_rate": 2e-05,
355
+ "loss": 0.3418,
356
+ "step": 58
357
+ },
358
+ {
359
+ "epoch": 0.36,
360
+ "learning_rate": 2e-05,
361
+ "loss": 0.3175,
362
+ "step": 59
363
+ },
364
+ {
365
+ "epoch": 0.37,
366
+ "learning_rate": 2e-05,
367
+ "loss": 0.3259,
368
+ "step": 60
369
+ },
370
+ {
371
+ "epoch": 0.37,
372
+ "learning_rate": 2e-05,
373
+ "loss": 0.2996,
374
+ "step": 61
375
+ },
376
+ {
377
+ "epoch": 0.38,
378
+ "learning_rate": 2e-05,
379
+ "loss": 0.3335,
380
+ "step": 62
381
+ },
382
+ {
383
+ "epoch": 0.38,
384
+ "learning_rate": 2e-05,
385
+ "loss": 0.314,
386
+ "step": 63
387
+ },
388
+ {
389
+ "epoch": 0.39,
390
+ "learning_rate": 2e-05,
391
+ "loss": 0.3087,
392
+ "step": 64
393
+ },
394
+ {
395
+ "epoch": 0.4,
396
+ "learning_rate": 2e-05,
397
+ "loss": 0.3124,
398
+ "step": 65
399
+ },
400
+ {
401
+ "epoch": 0.4,
402
+ "learning_rate": 2e-05,
403
+ "loss": 0.321,
404
+ "step": 66
405
+ },
406
+ {
407
+ "epoch": 0.41,
408
+ "learning_rate": 2e-05,
409
+ "loss": 0.3059,
410
+ "step": 67
411
+ },
412
+ {
413
+ "epoch": 0.41,
414
+ "learning_rate": 2e-05,
415
+ "loss": 0.2977,
416
+ "step": 68
417
+ },
418
+ {
419
+ "epoch": 0.42,
420
+ "learning_rate": 2e-05,
421
+ "loss": 0.2971,
422
+ "step": 69
423
+ },
424
+ {
425
+ "epoch": 0.43,
426
+ "learning_rate": 2e-05,
427
+ "loss": 0.3499,
428
+ "step": 70
429
+ },
430
+ {
431
+ "epoch": 0.43,
432
+ "learning_rate": 2e-05,
433
+ "loss": 0.2977,
434
+ "step": 71
435
+ },
436
+ {
437
+ "epoch": 0.44,
438
+ "learning_rate": 2e-05,
439
+ "loss": 0.3418,
440
+ "step": 72
441
+ },
442
+ {
443
+ "epoch": 0.45,
444
+ "learning_rate": 2e-05,
445
+ "loss": 0.3479,
446
+ "step": 73
447
+ },
448
+ {
449
+ "epoch": 0.45,
450
+ "learning_rate": 2e-05,
451
+ "loss": 0.2861,
452
+ "step": 74
453
+ },
454
+ {
455
+ "epoch": 0.46,
456
+ "learning_rate": 2e-05,
457
+ "loss": 0.3472,
458
+ "step": 75
459
+ },
460
+ {
461
+ "epoch": 0.46,
462
+ "learning_rate": 2e-05,
463
+ "loss": 0.3213,
464
+ "step": 76
465
+ },
466
+ {
467
+ "epoch": 0.47,
468
+ "learning_rate": 2e-05,
469
+ "loss": 0.3031,
470
+ "step": 77
471
+ },
472
+ {
473
+ "epoch": 0.48,
474
+ "learning_rate": 2e-05,
475
+ "loss": 0.3098,
476
+ "step": 78
477
+ },
478
+ {
479
+ "epoch": 0.48,
480
+ "learning_rate": 2e-05,
481
+ "loss": 0.3196,
482
+ "step": 79
483
+ },
484
+ {
485
+ "epoch": 0.49,
486
+ "learning_rate": 2e-05,
487
+ "loss": 0.2961,
488
+ "step": 80
489
+ },
490
+ {
491
+ "epoch": 0.49,
492
+ "learning_rate": 2e-05,
493
+ "loss": 0.2742,
494
+ "step": 81
495
+ },
496
+ {
497
+ "epoch": 0.5,
498
+ "learning_rate": 2e-05,
499
+ "loss": 0.2969,
500
+ "step": 82
501
+ },
502
+ {
503
+ "epoch": 0.51,
504
+ "learning_rate": 2e-05,
505
+ "loss": 0.3085,
506
+ "step": 83
507
+ },
508
+ {
509
+ "epoch": 0.51,
510
+ "learning_rate": 2e-05,
511
+ "loss": 0.2861,
512
+ "step": 84
513
+ },
514
+ {
515
+ "epoch": 0.52,
516
+ "learning_rate": 2e-05,
517
+ "loss": 0.3313,
518
+ "step": 85
519
+ },
520
+ {
521
+ "epoch": 0.52,
522
+ "learning_rate": 2e-05,
523
+ "loss": 0.3047,
524
+ "step": 86
525
+ },
526
+ {
527
+ "epoch": 0.53,
528
+ "learning_rate": 2e-05,
529
+ "loss": 0.2556,
530
+ "step": 87
531
+ },
532
+ {
533
+ "epoch": 0.54,
534
+ "learning_rate": 2e-05,
535
+ "loss": 0.3018,
536
+ "step": 88
537
+ },
538
+ {
539
+ "epoch": 0.54,
540
+ "learning_rate": 2e-05,
541
+ "loss": 0.2803,
542
+ "step": 89
543
+ },
544
+ {
545
+ "epoch": 0.55,
546
+ "learning_rate": 2e-05,
547
+ "loss": 0.3021,
548
+ "step": 90
549
+ },
550
+ {
551
+ "epoch": 0.55,
552
+ "learning_rate": 2e-05,
553
+ "loss": 0.2819,
554
+ "step": 91
555
+ },
556
+ {
557
+ "epoch": 0.56,
558
+ "learning_rate": 2e-05,
559
+ "loss": 0.3086,
560
+ "step": 92
561
+ },
562
+ {
563
+ "epoch": 0.57,
564
+ "learning_rate": 2e-05,
565
+ "loss": 0.3086,
566
+ "step": 93
567
+ },
568
+ {
569
+ "epoch": 0.57,
570
+ "learning_rate": 2e-05,
571
+ "loss": 0.2765,
572
+ "step": 94
573
+ },
574
+ {
575
+ "epoch": 0.58,
576
+ "learning_rate": 2e-05,
577
+ "loss": 0.3156,
578
+ "step": 95
579
+ },
580
+ {
581
+ "epoch": 0.59,
582
+ "learning_rate": 2e-05,
583
+ "loss": 0.287,
584
+ "step": 96
585
+ },
586
+ {
587
+ "epoch": 0.59,
588
+ "learning_rate": 2e-05,
589
+ "loss": 0.3473,
590
+ "step": 97
591
+ },
592
+ {
593
+ "epoch": 0.6,
594
+ "learning_rate": 2e-05,
595
+ "loss": 0.293,
596
+ "step": 98
597
+ },
598
+ {
599
+ "epoch": 0.6,
600
+ "learning_rate": 2e-05,
601
+ "loss": 0.3049,
602
+ "step": 99
603
+ },
604
+ {
605
+ "epoch": 0.61,
606
+ "learning_rate": 2e-05,
607
+ "loss": 0.2904,
608
+ "step": 100
609
+ },
610
+ {
611
+ "epoch": 0.62,
612
+ "learning_rate": 2e-05,
613
+ "loss": 0.3564,
614
+ "step": 101
615
+ },
616
+ {
617
+ "epoch": 0.62,
618
+ "learning_rate": 2e-05,
619
+ "loss": 0.2704,
620
+ "step": 102
621
+ },
622
+ {
623
+ "epoch": 0.63,
624
+ "learning_rate": 2e-05,
625
+ "loss": 0.3119,
626
+ "step": 103
627
+ },
628
+ {
629
+ "epoch": 0.63,
630
+ "learning_rate": 2e-05,
631
+ "loss": 0.2975,
632
+ "step": 104
633
+ },
634
+ {
635
+ "epoch": 0.64,
636
+ "learning_rate": 2e-05,
637
+ "loss": 0.2811,
638
+ "step": 105
639
+ },
640
+ {
641
+ "epoch": 0.65,
642
+ "learning_rate": 2e-05,
643
+ "loss": 0.3073,
644
+ "step": 106
645
+ },
646
+ {
647
+ "epoch": 0.65,
648
+ "learning_rate": 2e-05,
649
+ "loss": 0.2983,
650
+ "step": 107
651
+ },
652
+ {
653
+ "epoch": 0.66,
654
+ "learning_rate": 2e-05,
655
+ "loss": 0.2913,
656
+ "step": 108
657
+ },
658
+ {
659
+ "epoch": 0.66,
660
+ "learning_rate": 2e-05,
661
+ "loss": 0.2657,
662
+ "step": 109
663
+ },
664
+ {
665
+ "epoch": 0.67,
666
+ "learning_rate": 2e-05,
667
+ "loss": 0.2823,
668
+ "step": 110
669
+ },
670
+ {
671
+ "epoch": 0.68,
672
+ "learning_rate": 2e-05,
673
+ "loss": 0.2972,
674
+ "step": 111
675
+ },
676
+ {
677
+ "epoch": 0.68,
678
+ "learning_rate": 2e-05,
679
+ "loss": 0.3135,
680
+ "step": 112
681
+ },
682
+ {
683
+ "epoch": 0.69,
684
+ "learning_rate": 2e-05,
685
+ "loss": 0.2676,
686
+ "step": 113
687
+ },
688
+ {
689
+ "epoch": 0.7,
690
+ "learning_rate": 2e-05,
691
+ "loss": 0.2899,
692
+ "step": 114
693
+ },
694
+ {
695
+ "epoch": 0.7,
696
+ "learning_rate": 2e-05,
697
+ "loss": 0.272,
698
+ "step": 115
699
+ },
700
+ {
701
+ "epoch": 0.71,
702
+ "learning_rate": 2e-05,
703
+ "loss": 0.3032,
704
+ "step": 116
705
+ },
706
+ {
707
+ "epoch": 0.71,
708
+ "learning_rate": 2e-05,
709
+ "loss": 0.3073,
710
+ "step": 117
711
+ },
712
+ {
713
+ "epoch": 0.72,
714
+ "learning_rate": 2e-05,
715
+ "loss": 0.3027,
716
+ "step": 118
717
+ },
718
+ {
719
+ "epoch": 0.73,
720
+ "learning_rate": 2e-05,
721
+ "loss": 0.2893,
722
+ "step": 119
723
+ },
724
+ {
725
+ "epoch": 0.73,
726
+ "learning_rate": 2e-05,
727
+ "loss": 0.3013,
728
+ "step": 120
729
+ },
730
+ {
731
+ "epoch": 0.74,
732
+ "learning_rate": 2e-05,
733
+ "loss": 0.2638,
734
+ "step": 121
735
+ },
736
+ {
737
+ "epoch": 0.74,
738
+ "learning_rate": 2e-05,
739
+ "loss": 0.2773,
740
+ "step": 122
741
+ },
742
+ {
743
+ "epoch": 0.75,
744
+ "learning_rate": 2e-05,
745
+ "loss": 0.2708,
746
+ "step": 123
747
+ },
748
+ {
749
+ "epoch": 0.76,
750
+ "learning_rate": 2e-05,
751
+ "loss": 0.2303,
752
+ "step": 124
753
+ },
754
+ {
755
+ "epoch": 0.76,
756
+ "learning_rate": 2e-05,
757
+ "loss": 0.2764,
758
+ "step": 125
759
+ },
760
+ {
761
+ "epoch": 0.77,
762
+ "learning_rate": 2e-05,
763
+ "loss": 0.2909,
764
+ "step": 126
765
+ },
766
+ {
767
+ "epoch": 0.77,
768
+ "learning_rate": 2e-05,
769
+ "loss": 0.2799,
770
+ "step": 127
771
+ },
772
+ {
773
+ "epoch": 0.78,
774
+ "learning_rate": 2e-05,
775
+ "loss": 0.2573,
776
+ "step": 128
777
+ },
778
+ {
779
+ "epoch": 0.79,
780
+ "learning_rate": 2e-05,
781
+ "loss": 0.2845,
782
+ "step": 129
783
+ },
784
+ {
785
+ "epoch": 0.79,
786
+ "learning_rate": 2e-05,
787
+ "loss": 0.2667,
788
+ "step": 130
789
+ },
790
+ {
791
+ "epoch": 0.8,
792
+ "learning_rate": 2e-05,
793
+ "loss": 0.2623,
794
+ "step": 131
795
+ },
796
+ {
797
+ "epoch": 0.8,
798
+ "learning_rate": 2e-05,
799
+ "loss": 0.2728,
800
+ "step": 132
801
+ },
802
+ {
803
+ "epoch": 0.81,
804
+ "learning_rate": 2e-05,
805
+ "loss": 0.3169,
806
+ "step": 133
807
+ },
808
+ {
809
+ "epoch": 0.82,
810
+ "learning_rate": 2e-05,
811
+ "loss": 0.2861,
812
+ "step": 134
813
+ },
814
+ {
815
+ "epoch": 0.82,
816
+ "learning_rate": 2e-05,
817
+ "loss": 0.2921,
818
+ "step": 135
819
+ },
820
+ {
821
+ "epoch": 0.83,
822
+ "learning_rate": 2e-05,
823
+ "loss": 0.304,
824
+ "step": 136
825
+ },
826
+ {
827
+ "epoch": 0.84,
828
+ "learning_rate": 2e-05,
829
+ "loss": 0.2804,
830
+ "step": 137
831
+ },
832
+ {
833
+ "epoch": 0.84,
834
+ "learning_rate": 2e-05,
835
+ "loss": 0.2739,
836
+ "step": 138
837
+ },
838
+ {
839
+ "epoch": 0.85,
840
+ "learning_rate": 2e-05,
841
+ "loss": 0.2738,
842
+ "step": 139
843
+ },
844
+ {
845
+ "epoch": 0.85,
846
+ "learning_rate": 2e-05,
847
+ "loss": 0.2675,
848
+ "step": 140
849
+ },
850
+ {
851
+ "epoch": 0.86,
852
+ "learning_rate": 2e-05,
853
+ "loss": 0.2942,
854
+ "step": 141
855
+ },
856
+ {
857
+ "epoch": 0.87,
858
+ "learning_rate": 2e-05,
859
+ "loss": 0.3074,
860
+ "step": 142
861
+ },
862
+ {
863
+ "epoch": 0.87,
864
+ "learning_rate": 2e-05,
865
+ "loss": 0.265,
866
+ "step": 143
867
+ },
868
+ {
869
+ "epoch": 0.88,
870
+ "learning_rate": 2e-05,
871
+ "loss": 0.2885,
872
+ "step": 144
873
+ },
874
+ {
875
+ "epoch": 0.88,
876
+ "learning_rate": 2e-05,
877
+ "loss": 0.3245,
878
+ "step": 145
879
+ },
880
+ {
881
+ "epoch": 0.89,
882
+ "learning_rate": 2e-05,
883
+ "loss": 0.2657,
884
+ "step": 146
885
+ },
886
+ {
887
+ "epoch": 0.9,
888
+ "learning_rate": 2e-05,
889
+ "loss": 0.2815,
890
+ "step": 147
891
+ },
892
+ {
893
+ "epoch": 0.9,
894
+ "learning_rate": 2e-05,
895
+ "loss": 0.2615,
896
+ "step": 148
897
+ },
898
+ {
899
+ "epoch": 0.91,
900
+ "learning_rate": 2e-05,
901
+ "loss": 0.3202,
902
+ "step": 149
903
+ },
904
+ {
905
+ "epoch": 0.91,
906
+ "learning_rate": 2e-05,
907
+ "loss": 0.3335,
908
+ "step": 150
909
+ },
910
+ {
911
+ "epoch": 0.92,
912
+ "learning_rate": 2e-05,
913
+ "loss": 0.3079,
914
+ "step": 151
915
+ },
916
+ {
917
+ "epoch": 0.93,
918
+ "learning_rate": 2e-05,
919
+ "loss": 0.3115,
920
+ "step": 152
921
+ },
922
+ {
923
+ "epoch": 0.93,
924
+ "learning_rate": 2e-05,
925
+ "loss": 0.2924,
926
+ "step": 153
927
+ },
928
+ {
929
+ "epoch": 0.94,
930
+ "learning_rate": 2e-05,
931
+ "loss": 0.2808,
932
+ "step": 154
933
+ },
934
+ {
935
+ "epoch": 0.95,
936
+ "learning_rate": 2e-05,
937
+ "loss": 0.2941,
938
+ "step": 155
939
+ },
940
+ {
941
+ "epoch": 0.95,
942
+ "learning_rate": 2e-05,
943
+ "loss": 0.2778,
944
+ "step": 156
945
+ },
946
+ {
947
+ "epoch": 0.96,
948
+ "learning_rate": 2e-05,
949
+ "loss": 0.2935,
950
+ "step": 157
951
+ },
952
+ {
953
+ "epoch": 0.96,
954
+ "learning_rate": 2e-05,
955
+ "loss": 0.3024,
956
+ "step": 158
957
+ },
958
+ {
959
+ "epoch": 0.97,
960
+ "learning_rate": 2e-05,
961
+ "loss": 0.2821,
962
+ "step": 159
963
+ },
964
+ {
965
+ "epoch": 0.98,
966
+ "learning_rate": 2e-05,
967
+ "loss": 0.3009,
968
+ "step": 160
969
+ },
970
+ {
971
+ "epoch": 0.98,
972
+ "learning_rate": 2e-05,
973
+ "loss": 0.3063,
974
+ "step": 161
975
+ },
976
+ {
977
+ "epoch": 0.99,
978
+ "learning_rate": 2e-05,
979
+ "loss": 0.2719,
980
+ "step": 162
981
+ },
982
+ {
983
+ "epoch": 0.99,
984
+ "learning_rate": 2e-05,
985
+ "loss": 0.2865,
986
+ "step": 163
987
+ },
988
+ {
989
+ "epoch": 1.0,
990
+ "learning_rate": 2e-05,
991
+ "loss": 0.2507,
992
+ "step": 164
993
+ },
994
+ {
995
+ "epoch": 1.01,
996
+ "learning_rate": 2e-05,
997
+ "loss": 0.2312,
998
+ "step": 165
999
+ },
1000
+ {
1001
+ "epoch": 1.01,
1002
+ "learning_rate": 2e-05,
1003
+ "loss": 0.2336,
1004
+ "step": 166
1005
+ },
1006
+ {
1007
+ "epoch": 1.02,
1008
+ "learning_rate": 2e-05,
1009
+ "loss": 0.2188,
1010
+ "step": 167
1011
+ },
1012
+ {
1013
+ "epoch": 1.02,
1014
+ "learning_rate": 2e-05,
1015
+ "loss": 0.1769,
1016
+ "step": 168
1017
+ },
1018
+ {
1019
+ "epoch": 1.03,
1020
+ "learning_rate": 2e-05,
1021
+ "loss": 0.1957,
1022
+ "step": 169
1023
+ },
1024
+ {
1025
+ "epoch": 1.04,
1026
+ "learning_rate": 2e-05,
1027
+ "loss": 0.1978,
1028
+ "step": 170
1029
+ },
1030
+ {
1031
+ "epoch": 1.04,
1032
+ "learning_rate": 2e-05,
1033
+ "loss": 0.2185,
1034
+ "step": 171
1035
+ },
1036
+ {
1037
+ "epoch": 1.05,
1038
+ "learning_rate": 2e-05,
1039
+ "loss": 0.1927,
1040
+ "step": 172
1041
+ },
1042
+ {
1043
+ "epoch": 1.05,
1044
+ "learning_rate": 2e-05,
1045
+ "loss": 0.2238,
1046
+ "step": 173
1047
+ },
1048
+ {
1049
+ "epoch": 1.06,
1050
+ "learning_rate": 2e-05,
1051
+ "loss": 0.2159,
1052
+ "step": 174
1053
+ },
1054
+ {
1055
+ "epoch": 1.07,
1056
+ "learning_rate": 2e-05,
1057
+ "loss": 0.1945,
1058
+ "step": 175
1059
+ },
1060
+ {
1061
+ "epoch": 1.07,
1062
+ "learning_rate": 2e-05,
1063
+ "loss": 0.2045,
1064
+ "step": 176
1065
+ },
1066
+ {
1067
+ "epoch": 1.08,
1068
+ "learning_rate": 2e-05,
1069
+ "loss": 0.198,
1070
+ "step": 177
1071
+ },
1072
+ {
1073
+ "epoch": 1.09,
1074
+ "learning_rate": 2e-05,
1075
+ "loss": 0.2074,
1076
+ "step": 178
1077
+ },
1078
+ {
1079
+ "epoch": 1.09,
1080
+ "learning_rate": 2e-05,
1081
+ "loss": 0.2106,
1082
+ "step": 179
1083
+ },
1084
+ {
1085
+ "epoch": 1.1,
1086
+ "learning_rate": 2e-05,
1087
+ "loss": 0.1984,
1088
+ "step": 180
1089
+ },
1090
+ {
1091
+ "epoch": 1.1,
1092
+ "learning_rate": 2e-05,
1093
+ "loss": 0.2229,
1094
+ "step": 181
1095
+ },
1096
+ {
1097
+ "epoch": 1.11,
1098
+ "learning_rate": 2e-05,
1099
+ "loss": 0.1941,
1100
+ "step": 182
1101
+ },
1102
+ {
1103
+ "epoch": 1.12,
1104
+ "learning_rate": 2e-05,
1105
+ "loss": 0.1978,
1106
+ "step": 183
1107
+ },
1108
+ {
1109
+ "epoch": 1.12,
1110
+ "learning_rate": 2e-05,
1111
+ "loss": 0.1808,
1112
+ "step": 184
1113
+ },
1114
+ {
1115
+ "epoch": 1.13,
1116
+ "learning_rate": 2e-05,
1117
+ "loss": 0.1936,
1118
+ "step": 185
1119
+ },
1120
+ {
1121
+ "epoch": 1.13,
1122
+ "learning_rate": 2e-05,
1123
+ "loss": 0.2327,
1124
+ "step": 186
1125
+ },
1126
+ {
1127
+ "epoch": 1.14,
1128
+ "learning_rate": 2e-05,
1129
+ "loss": 0.1876,
1130
+ "step": 187
1131
+ },
1132
+ {
1133
+ "epoch": 1.15,
1134
+ "learning_rate": 2e-05,
1135
+ "loss": 0.2247,
1136
+ "step": 188
1137
+ },
1138
+ {
1139
+ "epoch": 1.15,
1140
+ "learning_rate": 2e-05,
1141
+ "loss": 0.2155,
1142
+ "step": 189
1143
+ },
1144
+ {
1145
+ "epoch": 1.16,
1146
+ "learning_rate": 2e-05,
1147
+ "loss": 0.1917,
1148
+ "step": 190
1149
+ },
1150
+ {
1151
+ "epoch": 1.16,
1152
+ "learning_rate": 2e-05,
1153
+ "loss": 0.193,
1154
+ "step": 191
1155
+ },
1156
+ {
1157
+ "epoch": 1.17,
1158
+ "learning_rate": 2e-05,
1159
+ "loss": 0.2141,
1160
+ "step": 192
1161
+ },
1162
+ {
1163
+ "epoch": 1.18,
1164
+ "learning_rate": 2e-05,
1165
+ "loss": 0.2195,
1166
+ "step": 193
1167
+ },
1168
+ {
1169
+ "epoch": 1.18,
1170
+ "learning_rate": 2e-05,
1171
+ "loss": 0.1937,
1172
+ "step": 194
1173
+ },
1174
+ {
1175
+ "epoch": 1.19,
1176
+ "learning_rate": 2e-05,
1177
+ "loss": 0.2067,
1178
+ "step": 195
1179
+ },
1180
+ {
1181
+ "epoch": 1.2,
1182
+ "learning_rate": 2e-05,
1183
+ "loss": 0.2426,
1184
+ "step": 196
1185
+ },
1186
+ {
1187
+ "epoch": 1.2,
1188
+ "learning_rate": 2e-05,
1189
+ "loss": 0.2234,
1190
+ "step": 197
1191
+ },
1192
+ {
1193
+ "epoch": 1.21,
1194
+ "learning_rate": 2e-05,
1195
+ "loss": 0.2133,
1196
+ "step": 198
1197
+ },
1198
+ {
1199
+ "epoch": 1.21,
1200
+ "learning_rate": 2e-05,
1201
+ "loss": 0.2021,
1202
+ "step": 199
1203
+ },
1204
+ {
1205
+ "epoch": 1.22,
1206
+ "learning_rate": 2e-05,
1207
+ "loss": 0.2015,
1208
+ "step": 200
1209
+ },
1210
+ {
1211
+ "epoch": 1.23,
1212
+ "learning_rate": 2e-05,
1213
+ "loss": 0.2271,
1214
+ "step": 201
1215
+ },
1216
+ {
1217
+ "epoch": 1.23,
1218
+ "learning_rate": 2e-05,
1219
+ "loss": 0.2024,
1220
+ "step": 202
1221
+ },
1222
+ {
1223
+ "epoch": 1.24,
1224
+ "learning_rate": 2e-05,
1225
+ "loss": 0.2025,
1226
+ "step": 203
1227
+ },
1228
+ {
1229
+ "epoch": 1.24,
1230
+ "learning_rate": 2e-05,
1231
+ "loss": 0.2179,
1232
+ "step": 204
1233
+ },
1234
+ {
1235
+ "epoch": 1.25,
1236
+ "learning_rate": 2e-05,
1237
+ "loss": 0.2128,
1238
+ "step": 205
1239
+ },
1240
+ {
1241
+ "epoch": 1.26,
1242
+ "learning_rate": 2e-05,
1243
+ "loss": 0.1875,
1244
+ "step": 206
1245
+ },
1246
+ {
1247
+ "epoch": 1.26,
1248
+ "learning_rate": 2e-05,
1249
+ "loss": 0.2263,
1250
+ "step": 207
1251
+ },
1252
+ {
1253
+ "epoch": 1.27,
1254
+ "learning_rate": 2e-05,
1255
+ "loss": 0.2032,
1256
+ "step": 208
1257
+ },
1258
+ {
1259
+ "epoch": 1.27,
1260
+ "learning_rate": 2e-05,
1261
+ "loss": 0.203,
1262
+ "step": 209
1263
+ },
1264
+ {
1265
+ "epoch": 1.28,
1266
+ "learning_rate": 2e-05,
1267
+ "loss": 0.2262,
1268
+ "step": 210
1269
+ },
1270
+ {
1271
+ "epoch": 1.29,
1272
+ "learning_rate": 2e-05,
1273
+ "loss": 0.2151,
1274
+ "step": 211
1275
+ },
1276
+ {
1277
+ "epoch": 1.29,
1278
+ "learning_rate": 2e-05,
1279
+ "loss": 0.2174,
1280
+ "step": 212
1281
+ },
1282
+ {
1283
+ "epoch": 1.3,
1284
+ "learning_rate": 2e-05,
1285
+ "loss": 0.1843,
1286
+ "step": 213
1287
+ },
1288
+ {
1289
+ "epoch": 1.3,
1290
+ "learning_rate": 2e-05,
1291
+ "loss": 0.1965,
1292
+ "step": 214
1293
+ },
1294
+ {
1295
+ "epoch": 1.31,
1296
+ "learning_rate": 2e-05,
1297
+ "loss": 0.2238,
1298
+ "step": 215
1299
+ },
1300
+ {
1301
+ "epoch": 1.32,
1302
+ "learning_rate": 2e-05,
1303
+ "loss": 0.1907,
1304
+ "step": 216
1305
+ },
1306
+ {
1307
+ "epoch": 1.32,
1308
+ "learning_rate": 2e-05,
1309
+ "loss": 0.2238,
1310
+ "step": 217
1311
+ },
1312
+ {
1313
+ "epoch": 1.33,
1314
+ "learning_rate": 2e-05,
1315
+ "loss": 0.1917,
1316
+ "step": 218
1317
+ },
1318
+ {
1319
+ "epoch": 1.34,
1320
+ "learning_rate": 2e-05,
1321
+ "loss": 0.2024,
1322
+ "step": 219
1323
+ },
1324
+ {
1325
+ "epoch": 1.34,
1326
+ "learning_rate": 2e-05,
1327
+ "loss": 0.2097,
1328
+ "step": 220
1329
+ },
1330
+ {
1331
+ "epoch": 1.35,
1332
+ "learning_rate": 2e-05,
1333
+ "loss": 0.2217,
1334
+ "step": 221
1335
+ },
1336
+ {
1337
+ "epoch": 1.35,
1338
+ "learning_rate": 2e-05,
1339
+ "loss": 0.2335,
1340
+ "step": 222
1341
+ },
1342
+ {
1343
+ "epoch": 1.36,
1344
+ "learning_rate": 2e-05,
1345
+ "loss": 0.2006,
1346
+ "step": 223
1347
+ },
1348
+ {
1349
+ "epoch": 1.37,
1350
+ "learning_rate": 2e-05,
1351
+ "loss": 0.215,
1352
+ "step": 224
1353
+ },
1354
+ {
1355
+ "epoch": 1.37,
1356
+ "learning_rate": 2e-05,
1357
+ "loss": 0.2419,
1358
+ "step": 225
1359
+ },
1360
+ {
1361
+ "epoch": 1.38,
1362
+ "learning_rate": 2e-05,
1363
+ "loss": 0.2238,
1364
+ "step": 226
1365
+ },
1366
+ {
1367
+ "epoch": 1.38,
1368
+ "learning_rate": 2e-05,
1369
+ "loss": 0.2188,
1370
+ "step": 227
1371
+ },
1372
+ {
1373
+ "epoch": 1.39,
1374
+ "learning_rate": 2e-05,
1375
+ "loss": 0.1932,
1376
+ "step": 228
1377
+ },
1378
+ {
1379
+ "epoch": 1.4,
1380
+ "learning_rate": 2e-05,
1381
+ "loss": 0.2273,
1382
+ "step": 229
1383
+ },
1384
+ {
1385
+ "epoch": 1.4,
1386
+ "learning_rate": 2e-05,
1387
+ "loss": 0.2068,
1388
+ "step": 230
1389
+ },
1390
+ {
1391
+ "epoch": 1.41,
1392
+ "learning_rate": 2e-05,
1393
+ "loss": 0.1949,
1394
+ "step": 231
1395
+ },
1396
+ {
1397
+ "epoch": 1.41,
1398
+ "learning_rate": 2e-05,
1399
+ "loss": 0.2098,
1400
+ "step": 232
1401
+ },
1402
+ {
1403
+ "epoch": 1.42,
1404
+ "learning_rate": 2e-05,
1405
+ "loss": 0.2012,
1406
+ "step": 233
1407
+ },
1408
+ {
1409
+ "epoch": 1.43,
1410
+ "learning_rate": 2e-05,
1411
+ "loss": 0.2117,
1412
+ "step": 234
1413
+ },
1414
+ {
1415
+ "epoch": 1.43,
1416
+ "learning_rate": 2e-05,
1417
+ "loss": 0.2446,
1418
+ "step": 235
1419
+ },
1420
+ {
1421
+ "epoch": 1.44,
1422
+ "learning_rate": 2e-05,
1423
+ "loss": 0.2042,
1424
+ "step": 236
1425
+ },
1426
+ {
1427
+ "epoch": 1.45,
1428
+ "learning_rate": 2e-05,
1429
+ "loss": 0.1885,
1430
+ "step": 237
1431
+ },
1432
+ {
1433
+ "epoch": 1.45,
1434
+ "learning_rate": 2e-05,
1435
+ "loss": 0.2098,
1436
+ "step": 238
1437
+ },
1438
+ {
1439
+ "epoch": 1.46,
1440
+ "learning_rate": 2e-05,
1441
+ "loss": 0.2004,
1442
+ "step": 239
1443
+ },
1444
+ {
1445
+ "epoch": 1.46,
1446
+ "learning_rate": 2e-05,
1447
+ "loss": 0.2108,
1448
+ "step": 240
1449
+ },
1450
+ {
1451
+ "epoch": 1.47,
1452
+ "learning_rate": 2e-05,
1453
+ "loss": 0.1917,
1454
+ "step": 241
1455
+ },
1456
+ {
1457
+ "epoch": 1.48,
1458
+ "learning_rate": 2e-05,
1459
+ "loss": 0.2324,
1460
+ "step": 242
1461
+ },
1462
+ {
1463
+ "epoch": 1.48,
1464
+ "learning_rate": 2e-05,
1465
+ "loss": 0.2211,
1466
+ "step": 243
1467
+ },
1468
+ {
1469
+ "epoch": 1.49,
1470
+ "learning_rate": 2e-05,
1471
+ "loss": 0.2102,
1472
+ "step": 244
1473
+ },
1474
+ {
1475
+ "epoch": 1.49,
1476
+ "learning_rate": 2e-05,
1477
+ "loss": 0.2356,
1478
+ "step": 245
1479
+ },
1480
+ {
1481
+ "epoch": 1.5,
1482
+ "learning_rate": 2e-05,
1483
+ "loss": 0.2201,
1484
+ "step": 246
1485
+ },
1486
+ {
1487
+ "epoch": 1.51,
1488
+ "learning_rate": 2e-05,
1489
+ "loss": 0.2198,
1490
+ "step": 247
1491
+ },
1492
+ {
1493
+ "epoch": 1.51,
1494
+ "learning_rate": 2e-05,
1495
+ "loss": 0.2089,
1496
+ "step": 248
1497
+ },
1498
+ {
1499
+ "epoch": 1.52,
1500
+ "learning_rate": 2e-05,
1501
+ "loss": 0.2169,
1502
+ "step": 249
1503
+ },
1504
+ {
1505
+ "epoch": 1.52,
1506
+ "learning_rate": 2e-05,
1507
+ "loss": 0.2181,
1508
+ "step": 250
1509
+ },
1510
+ {
1511
+ "epoch": 1.53,
1512
+ "learning_rate": 2e-05,
1513
+ "loss": 0.2356,
1514
+ "step": 251
1515
+ },
1516
+ {
1517
+ "epoch": 1.54,
1518
+ "learning_rate": 2e-05,
1519
+ "loss": 0.1996,
1520
+ "step": 252
1521
+ },
1522
+ {
1523
+ "epoch": 1.54,
1524
+ "learning_rate": 2e-05,
1525
+ "loss": 0.2262,
1526
+ "step": 253
1527
+ },
1528
+ {
1529
+ "epoch": 1.55,
1530
+ "learning_rate": 2e-05,
1531
+ "loss": 0.2146,
1532
+ "step": 254
1533
+ },
1534
+ {
1535
+ "epoch": 1.55,
1536
+ "learning_rate": 2e-05,
1537
+ "loss": 0.2051,
1538
+ "step": 255
1539
+ },
1540
+ {
1541
+ "epoch": 1.56,
1542
+ "learning_rate": 2e-05,
1543
+ "loss": 0.2008,
1544
+ "step": 256
1545
+ },
1546
+ {
1547
+ "epoch": 1.57,
1548
+ "learning_rate": 2e-05,
1549
+ "loss": 0.222,
1550
+ "step": 257
1551
+ },
1552
+ {
1553
+ "epoch": 1.57,
1554
+ "learning_rate": 2e-05,
1555
+ "loss": 0.2349,
1556
+ "step": 258
1557
+ },
1558
+ {
1559
+ "epoch": 1.58,
1560
+ "learning_rate": 2e-05,
1561
+ "loss": 0.217,
1562
+ "step": 259
1563
+ },
1564
+ {
1565
+ "epoch": 1.59,
1566
+ "learning_rate": 2e-05,
1567
+ "loss": 0.223,
1568
+ "step": 260
1569
+ },
1570
+ {
1571
+ "epoch": 1.59,
1572
+ "learning_rate": 2e-05,
1573
+ "loss": 0.2213,
1574
+ "step": 261
1575
+ },
1576
+ {
1577
+ "epoch": 1.6,
1578
+ "learning_rate": 2e-05,
1579
+ "loss": 0.2327,
1580
+ "step": 262
1581
+ },
1582
+ {
1583
+ "epoch": 1.6,
1584
+ "learning_rate": 2e-05,
1585
+ "loss": 0.2203,
1586
+ "step": 263
1587
+ },
1588
+ {
1589
+ "epoch": 1.61,
1590
+ "learning_rate": 2e-05,
1591
+ "loss": 0.2134,
1592
+ "step": 264
1593
+ },
1594
+ {
1595
+ "epoch": 1.62,
1596
+ "learning_rate": 2e-05,
1597
+ "loss": 0.2103,
1598
+ "step": 265
1599
+ },
1600
+ {
1601
+ "epoch": 1.62,
1602
+ "learning_rate": 2e-05,
1603
+ "loss": 0.2181,
1604
+ "step": 266
1605
+ },
1606
+ {
1607
+ "epoch": 1.63,
1608
+ "learning_rate": 2e-05,
1609
+ "loss": 0.2207,
1610
+ "step": 267
1611
+ },
1612
+ {
1613
+ "epoch": 1.63,
1614
+ "learning_rate": 2e-05,
1615
+ "loss": 0.2064,
1616
+ "step": 268
1617
+ },
1618
+ {
1619
+ "epoch": 1.64,
1620
+ "learning_rate": 2e-05,
1621
+ "loss": 0.2107,
1622
+ "step": 269
1623
+ },
1624
+ {
1625
+ "epoch": 1.65,
1626
+ "learning_rate": 2e-05,
1627
+ "loss": 0.2234,
1628
+ "step": 270
1629
+ },
1630
+ {
1631
+ "epoch": 1.65,
1632
+ "learning_rate": 2e-05,
1633
+ "loss": 0.2382,
1634
+ "step": 271
1635
+ },
1636
+ {
1637
+ "epoch": 1.66,
1638
+ "learning_rate": 2e-05,
1639
+ "loss": 0.1884,
1640
+ "step": 272
1641
+ },
1642
+ {
1643
+ "epoch": 1.66,
1644
+ "learning_rate": 2e-05,
1645
+ "loss": 0.2007,
1646
+ "step": 273
1647
+ },
1648
+ {
1649
+ "epoch": 1.67,
1650
+ "learning_rate": 2e-05,
1651
+ "loss": 0.2222,
1652
+ "step": 274
1653
+ },
1654
+ {
1655
+ "epoch": 1.68,
1656
+ "learning_rate": 2e-05,
1657
+ "loss": 0.2294,
1658
+ "step": 275
1659
+ },
1660
+ {
1661
+ "epoch": 1.68,
1662
+ "learning_rate": 2e-05,
1663
+ "loss": 0.2177,
1664
+ "step": 276
1665
+ },
1666
+ {
1667
+ "epoch": 1.69,
1668
+ "learning_rate": 2e-05,
1669
+ "loss": 0.2065,
1670
+ "step": 277
1671
+ },
1672
+ {
1673
+ "epoch": 1.7,
1674
+ "learning_rate": 2e-05,
1675
+ "loss": 0.2003,
1676
+ "step": 278
1677
+ },
1678
+ {
1679
+ "epoch": 1.7,
1680
+ "learning_rate": 2e-05,
1681
+ "loss": 0.1974,
1682
+ "step": 279
1683
+ },
1684
+ {
1685
+ "epoch": 1.71,
1686
+ "learning_rate": 2e-05,
1687
+ "loss": 0.2146,
1688
+ "step": 280
1689
+ },
1690
+ {
1691
+ "epoch": 1.71,
1692
+ "learning_rate": 2e-05,
1693
+ "loss": 0.2466,
1694
+ "step": 281
1695
+ },
1696
+ {
1697
+ "epoch": 1.72,
1698
+ "learning_rate": 2e-05,
1699
+ "loss": 0.2023,
1700
+ "step": 282
1701
+ },
1702
+ {
1703
+ "epoch": 1.73,
1704
+ "learning_rate": 2e-05,
1705
+ "loss": 0.1937,
1706
+ "step": 283
1707
+ },
1708
+ {
1709
+ "epoch": 1.73,
1710
+ "learning_rate": 2e-05,
1711
+ "loss": 0.2134,
1712
+ "step": 284
1713
+ },
1714
+ {
1715
+ "epoch": 1.74,
1716
+ "learning_rate": 2e-05,
1717
+ "loss": 0.2152,
1718
+ "step": 285
1719
+ },
1720
+ {
1721
+ "epoch": 1.74,
1722
+ "learning_rate": 2e-05,
1723
+ "loss": 0.2357,
1724
+ "step": 286
1725
+ },
1726
+ {
1727
+ "epoch": 1.75,
1728
+ "learning_rate": 2e-05,
1729
+ "loss": 0.2061,
1730
+ "step": 287
1731
+ },
1732
+ {
1733
+ "epoch": 1.76,
1734
+ "learning_rate": 2e-05,
1735
+ "loss": 0.2004,
1736
+ "step": 288
1737
+ },
1738
+ {
1739
+ "epoch": 1.76,
1740
+ "learning_rate": 2e-05,
1741
+ "loss": 0.1984,
1742
+ "step": 289
1743
+ },
1744
+ {
1745
+ "epoch": 1.77,
1746
+ "learning_rate": 2e-05,
1747
+ "loss": 0.2134,
1748
+ "step": 290
1749
+ },
1750
+ {
1751
+ "epoch": 1.77,
1752
+ "learning_rate": 2e-05,
1753
+ "loss": 0.2006,
1754
+ "step": 291
1755
+ },
1756
+ {
1757
+ "epoch": 1.78,
1758
+ "learning_rate": 2e-05,
1759
+ "loss": 0.2203,
1760
+ "step": 292
1761
+ },
1762
+ {
1763
+ "epoch": 1.79,
1764
+ "learning_rate": 2e-05,
1765
+ "loss": 0.2067,
1766
+ "step": 293
1767
+ },
1768
+ {
1769
+ "epoch": 1.79,
1770
+ "learning_rate": 2e-05,
1771
+ "loss": 0.2218,
1772
+ "step": 294
1773
+ },
1774
+ {
1775
+ "epoch": 1.8,
1776
+ "learning_rate": 2e-05,
1777
+ "loss": 0.2255,
1778
+ "step": 295
1779
+ },
1780
+ {
1781
+ "epoch": 1.8,
1782
+ "learning_rate": 2e-05,
1783
+ "loss": 0.2156,
1784
+ "step": 296
1785
+ },
1786
+ {
1787
+ "epoch": 1.81,
1788
+ "learning_rate": 2e-05,
1789
+ "loss": 0.2092,
1790
+ "step": 297
1791
+ },
1792
+ {
1793
+ "epoch": 1.82,
1794
+ "learning_rate": 2e-05,
1795
+ "loss": 0.2135,
1796
+ "step": 298
1797
+ },
1798
+ {
1799
+ "epoch": 1.82,
1800
+ "learning_rate": 2e-05,
1801
+ "loss": 0.2123,
1802
+ "step": 299
1803
+ },
1804
+ {
1805
+ "epoch": 1.83,
1806
+ "learning_rate": 2e-05,
1807
+ "loss": 0.2131,
1808
+ "step": 300
1809
+ },
1810
+ {
1811
+ "epoch": 1.84,
1812
+ "learning_rate": 2e-05,
1813
+ "loss": 0.1343,
1814
+ "step": 301
1815
+ },
1816
+ {
1817
+ "epoch": 1.84,
1818
+ "learning_rate": 2e-05,
1819
+ "loss": 0.1756,
1820
+ "step": 302
1821
+ },
1822
+ {
1823
+ "epoch": 1.85,
1824
+ "learning_rate": 2e-05,
1825
+ "loss": 0.1584,
1826
+ "step": 303
1827
+ },
1828
+ {
1829
+ "epoch": 1.85,
1830
+ "learning_rate": 2e-05,
1831
+ "loss": 0.17,
1832
+ "step": 304
1833
+ },
1834
+ {
1835
+ "epoch": 1.86,
1836
+ "learning_rate": 2e-05,
1837
+ "loss": 0.1644,
1838
+ "step": 305
1839
+ },
1840
+ {
1841
+ "epoch": 1.87,
1842
+ "learning_rate": 2e-05,
1843
+ "loss": 0.1394,
1844
+ "step": 306
1845
+ },
1846
+ {
1847
+ "epoch": 1.87,
1848
+ "learning_rate": 2e-05,
1849
+ "loss": 0.1244,
1850
+ "step": 307
1851
+ },
1852
+ {
1853
+ "epoch": 1.88,
1854
+ "learning_rate": 2e-05,
1855
+ "loss": 0.1575,
1856
+ "step": 308
1857
+ },
1858
+ {
1859
+ "epoch": 1.88,
1860
+ "learning_rate": 2e-05,
1861
+ "loss": 0.139,
1862
+ "step": 309
1863
+ },
1864
+ {
1865
+ "epoch": 1.89,
1866
+ "learning_rate": 2e-05,
1867
+ "loss": 0.1434,
1868
+ "step": 310
1869
+ },
1870
+ {
1871
+ "epoch": 1.9,
1872
+ "learning_rate": 2e-05,
1873
+ "loss": 0.1583,
1874
+ "step": 311
1875
+ },
1876
+ {
1877
+ "epoch": 1.9,
1878
+ "learning_rate": 2e-05,
1879
+ "loss": 0.1606,
1880
+ "step": 312
1881
+ },
1882
+ {
1883
+ "epoch": 1.91,
1884
+ "learning_rate": 2e-05,
1885
+ "loss": 0.1411,
1886
+ "step": 313
1887
+ },
1888
+ {
1889
+ "epoch": 1.91,
1890
+ "learning_rate": 2e-05,
1891
+ "loss": 0.1259,
1892
+ "step": 314
1893
+ },
1894
+ {
1895
+ "epoch": 1.92,
1896
+ "learning_rate": 2e-05,
1897
+ "loss": 0.1534,
1898
+ "step": 315
1899
+ },
1900
+ {
1901
+ "epoch": 1.93,
1902
+ "learning_rate": 2e-05,
1903
+ "loss": 0.129,
1904
+ "step": 316
1905
+ },
1906
+ {
1907
+ "epoch": 1.93,
1908
+ "learning_rate": 2e-05,
1909
+ "loss": 0.152,
1910
+ "step": 317
1911
+ },
1912
+ {
1913
+ "epoch": 1.94,
1914
+ "learning_rate": 2e-05,
1915
+ "loss": 0.1489,
1916
+ "step": 318
1917
+ },
1918
+ {
1919
+ "epoch": 1.95,
1920
+ "learning_rate": 2e-05,
1921
+ "loss": 0.1431,
1922
+ "step": 319
1923
+ },
1924
+ {
1925
+ "epoch": 1.95,
1926
+ "learning_rate": 2e-05,
1927
+ "loss": 0.1387,
1928
+ "step": 320
1929
+ },
1930
+ {
1931
+ "epoch": 1.96,
1932
+ "learning_rate": 2e-05,
1933
+ "loss": 0.1499,
1934
+ "step": 321
1935
+ },
1936
+ {
1937
+ "epoch": 1.96,
1938
+ "learning_rate": 2e-05,
1939
+ "loss": 0.1519,
1940
+ "step": 322
1941
+ },
1942
+ {
1943
+ "epoch": 1.97,
1944
+ "learning_rate": 2e-05,
1945
+ "loss": 0.1506,
1946
+ "step": 323
1947
+ },
1948
+ {
1949
+ "epoch": 1.98,
1950
+ "learning_rate": 2e-05,
1951
+ "loss": 0.1567,
1952
+ "step": 324
1953
+ },
1954
+ {
1955
+ "epoch": 1.98,
1956
+ "learning_rate": 2e-05,
1957
+ "loss": 0.141,
1958
+ "step": 325
1959
+ },
1960
+ {
1961
+ "epoch": 1.99,
1962
+ "learning_rate": 2e-05,
1963
+ "loss": 0.1713,
1964
+ "step": 326
1965
+ },
1966
+ {
1967
+ "epoch": 1.99,
1968
+ "learning_rate": 2e-05,
1969
+ "loss": 0.1493,
1970
+ "step": 327
1971
+ },
1972
+ {
1973
+ "epoch": 2.0,
1974
+ "learning_rate": 2e-05,
1975
+ "loss": 0.1281,
1976
+ "step": 328
1977
+ },
1978
+ {
1979
+ "epoch": 2.01,
1980
+ "learning_rate": 2e-05,
1981
+ "loss": 0.1395,
1982
+ "step": 329
1983
+ },
1984
+ {
1985
+ "epoch": 2.01,
1986
+ "learning_rate": 2e-05,
1987
+ "loss": 0.1452,
1988
+ "step": 330
1989
+ },
1990
+ {
1991
+ "epoch": 2.02,
1992
+ "learning_rate": 2e-05,
1993
+ "loss": 0.1342,
1994
+ "step": 331
1995
+ },
1996
+ {
1997
+ "epoch": 2.02,
1998
+ "learning_rate": 2e-05,
1999
+ "loss": 0.1445,
2000
+ "step": 332
2001
+ },
2002
+ {
2003
+ "epoch": 2.03,
2004
+ "learning_rate": 2e-05,
2005
+ "loss": 0.1173,
2006
+ "step": 333
2007
+ },
2008
+ {
2009
+ "epoch": 2.04,
2010
+ "learning_rate": 2e-05,
2011
+ "loss": 0.1302,
2012
+ "step": 334
2013
+ },
2014
+ {
2015
+ "epoch": 2.04,
2016
+ "learning_rate": 2e-05,
2017
+ "loss": 0.125,
2018
+ "step": 335
2019
+ },
2020
+ {
2021
+ "epoch": 2.05,
2022
+ "learning_rate": 2e-05,
2023
+ "loss": 0.1432,
2024
+ "step": 336
2025
+ },
2026
+ {
2027
+ "epoch": 2.05,
2028
+ "learning_rate": 2e-05,
2029
+ "loss": 0.1283,
2030
+ "step": 337
2031
+ },
2032
+ {
2033
+ "epoch": 2.06,
2034
+ "learning_rate": 2e-05,
2035
+ "loss": 0.1395,
2036
+ "step": 338
2037
+ },
2038
+ {
2039
+ "epoch": 2.07,
2040
+ "learning_rate": 2e-05,
2041
+ "loss": 0.1331,
2042
+ "step": 339
2043
+ },
2044
+ {
2045
+ "epoch": 2.07,
2046
+ "learning_rate": 2e-05,
2047
+ "loss": 0.1505,
2048
+ "step": 340
2049
+ },
2050
+ {
2051
+ "epoch": 2.08,
2052
+ "learning_rate": 2e-05,
2053
+ "loss": 0.1298,
2054
+ "step": 341
2055
+ },
2056
+ {
2057
+ "epoch": 2.09,
2058
+ "learning_rate": 2e-05,
2059
+ "loss": 0.1499,
2060
+ "step": 342
2061
+ },
2062
+ {
2063
+ "epoch": 2.09,
2064
+ "learning_rate": 2e-05,
2065
+ "loss": 0.151,
2066
+ "step": 343
2067
+ },
2068
+ {
2069
+ "epoch": 2.1,
2070
+ "learning_rate": 2e-05,
2071
+ "loss": 0.1628,
2072
+ "step": 344
2073
+ },
2074
+ {
2075
+ "epoch": 2.1,
2076
+ "learning_rate": 2e-05,
2077
+ "loss": 0.1378,
2078
+ "step": 345
2079
+ },
2080
+ {
2081
+ "epoch": 2.11,
2082
+ "learning_rate": 2e-05,
2083
+ "loss": 0.1484,
2084
+ "step": 346
2085
+ },
2086
+ {
2087
+ "epoch": 2.12,
2088
+ "learning_rate": 2e-05,
2089
+ "loss": 0.1513,
2090
+ "step": 347
2091
+ },
2092
+ {
2093
+ "epoch": 2.12,
2094
+ "learning_rate": 2e-05,
2095
+ "loss": 0.1323,
2096
+ "step": 348
2097
+ },
2098
+ {
2099
+ "epoch": 2.13,
2100
+ "learning_rate": 2e-05,
2101
+ "loss": 0.1422,
2102
+ "step": 349
2103
+ },
2104
+ {
2105
+ "epoch": 2.13,
2106
+ "learning_rate": 2e-05,
2107
+ "loss": 0.1706,
2108
+ "step": 350
2109
+ },
2110
+ {
2111
+ "epoch": 2.14,
2112
+ "learning_rate": 2e-05,
2113
+ "loss": 0.1591,
2114
+ "step": 351
2115
+ },
2116
+ {
2117
+ "epoch": 2.15,
2118
+ "learning_rate": 2e-05,
2119
+ "loss": 0.1483,
2120
+ "step": 352
2121
+ },
2122
+ {
2123
+ "epoch": 2.15,
2124
+ "learning_rate": 2e-05,
2125
+ "loss": 0.1484,
2126
+ "step": 353
2127
+ },
2128
+ {
2129
+ "epoch": 2.16,
2130
+ "learning_rate": 2e-05,
2131
+ "loss": 0.1393,
2132
+ "step": 354
2133
+ },
2134
+ {
2135
+ "epoch": 2.16,
2136
+ "learning_rate": 2e-05,
2137
+ "loss": 0.1543,
2138
+ "step": 355
2139
+ },
2140
+ {
2141
+ "epoch": 2.17,
2142
+ "learning_rate": 2e-05,
2143
+ "loss": 0.142,
2144
+ "step": 356
2145
+ },
2146
+ {
2147
+ "epoch": 2.18,
2148
+ "learning_rate": 2e-05,
2149
+ "loss": 0.1474,
2150
+ "step": 357
2151
+ },
2152
+ {
2153
+ "epoch": 2.18,
2154
+ "learning_rate": 2e-05,
2155
+ "loss": 0.1469,
2156
+ "step": 358
2157
+ },
2158
+ {
2159
+ "epoch": 2.19,
2160
+ "learning_rate": 2e-05,
2161
+ "loss": 0.1516,
2162
+ "step": 359
2163
+ },
2164
+ {
2165
+ "epoch": 2.2,
2166
+ "learning_rate": 2e-05,
2167
+ "loss": 0.1666,
2168
+ "step": 360
2169
+ },
2170
+ {
2171
+ "epoch": 2.2,
2172
+ "learning_rate": 2e-05,
2173
+ "loss": 0.1473,
2174
+ "step": 361
2175
+ },
2176
+ {
2177
+ "epoch": 2.21,
2178
+ "learning_rate": 2e-05,
2179
+ "loss": 0.1707,
2180
+ "step": 362
2181
+ },
2182
+ {
2183
+ "epoch": 2.21,
2184
+ "learning_rate": 2e-05,
2185
+ "loss": 0.1487,
2186
+ "step": 363
2187
+ },
2188
+ {
2189
+ "epoch": 2.22,
2190
+ "learning_rate": 2e-05,
2191
+ "loss": 0.1558,
2192
+ "step": 364
2193
+ },
2194
+ {
2195
+ "epoch": 2.23,
2196
+ "learning_rate": 2e-05,
2197
+ "loss": 0.1622,
2198
+ "step": 365
2199
+ },
2200
+ {
2201
+ "epoch": 2.23,
2202
+ "learning_rate": 2e-05,
2203
+ "loss": 0.1339,
2204
+ "step": 366
2205
+ },
2206
+ {
2207
+ "epoch": 2.24,
2208
+ "learning_rate": 2e-05,
2209
+ "loss": 0.1516,
2210
+ "step": 367
2211
+ },
2212
+ {
2213
+ "epoch": 2.24,
2214
+ "learning_rate": 2e-05,
2215
+ "loss": 0.1515,
2216
+ "step": 368
2217
+ },
2218
+ {
2219
+ "epoch": 2.25,
2220
+ "learning_rate": 2e-05,
2221
+ "loss": 0.1341,
2222
+ "step": 369
2223
+ },
2224
+ {
2225
+ "epoch": 2.26,
2226
+ "learning_rate": 2e-05,
2227
+ "loss": 0.1589,
2228
+ "step": 370
2229
+ },
2230
+ {
2231
+ "epoch": 2.26,
2232
+ "learning_rate": 2e-05,
2233
+ "loss": 0.1413,
2234
+ "step": 371
2235
+ },
2236
+ {
2237
+ "epoch": 2.27,
2238
+ "learning_rate": 2e-05,
2239
+ "loss": 0.1544,
2240
+ "step": 372
2241
+ },
2242
+ {
2243
+ "epoch": 2.27,
2244
+ "learning_rate": 2e-05,
2245
+ "loss": 0.1476,
2246
+ "step": 373
2247
+ },
2248
+ {
2249
+ "epoch": 2.28,
2250
+ "learning_rate": 2e-05,
2251
+ "loss": 0.1409,
2252
+ "step": 374
2253
+ },
2254
+ {
2255
+ "epoch": 2.29,
2256
+ "learning_rate": 2e-05,
2257
+ "loss": 0.1395,
2258
+ "step": 375
2259
+ },
2260
+ {
2261
+ "epoch": 2.29,
2262
+ "learning_rate": 2e-05,
2263
+ "loss": 0.1431,
2264
+ "step": 376
2265
+ },
2266
+ {
2267
+ "epoch": 2.3,
2268
+ "learning_rate": 2e-05,
2269
+ "loss": 0.1431,
2270
+ "step": 377
2271
+ },
2272
+ {
2273
+ "epoch": 2.3,
2274
+ "learning_rate": 2e-05,
2275
+ "loss": 0.1384,
2276
+ "step": 378
2277
+ },
2278
+ {
2279
+ "epoch": 2.31,
2280
+ "learning_rate": 2e-05,
2281
+ "loss": 0.166,
2282
+ "step": 379
2283
+ },
2284
+ {
2285
+ "epoch": 2.32,
2286
+ "learning_rate": 2e-05,
2287
+ "loss": 0.1419,
2288
+ "step": 380
2289
+ },
2290
+ {
2291
+ "epoch": 2.32,
2292
+ "learning_rate": 2e-05,
2293
+ "loss": 0.166,
2294
+ "step": 381
2295
+ },
2296
+ {
2297
+ "epoch": 2.33,
2298
+ "learning_rate": 2e-05,
2299
+ "loss": 0.1641,
2300
+ "step": 382
2301
+ },
2302
+ {
2303
+ "epoch": 2.34,
2304
+ "learning_rate": 2e-05,
2305
+ "loss": 0.1548,
2306
+ "step": 383
2307
+ },
2308
+ {
2309
+ "epoch": 2.34,
2310
+ "learning_rate": 2e-05,
2311
+ "loss": 0.1518,
2312
+ "step": 384
2313
+ },
2314
+ {
2315
+ "epoch": 2.35,
2316
+ "learning_rate": 2e-05,
2317
+ "loss": 0.1584,
2318
+ "step": 385
2319
+ },
2320
+ {
2321
+ "epoch": 2.35,
2322
+ "learning_rate": 2e-05,
2323
+ "loss": 0.1664,
2324
+ "step": 386
2325
+ },
2326
+ {
2327
+ "epoch": 2.36,
2328
+ "learning_rate": 2e-05,
2329
+ "loss": 0.1805,
2330
+ "step": 387
2331
+ },
2332
+ {
2333
+ "epoch": 2.37,
2334
+ "learning_rate": 2e-05,
2335
+ "loss": 0.1447,
2336
+ "step": 388
2337
+ },
2338
+ {
2339
+ "epoch": 2.37,
2340
+ "learning_rate": 2e-05,
2341
+ "loss": 0.1539,
2342
+ "step": 389
2343
+ },
2344
+ {
2345
+ "epoch": 2.38,
2346
+ "learning_rate": 2e-05,
2347
+ "loss": 0.1656,
2348
+ "step": 390
2349
+ },
2350
+ {
2351
+ "epoch": 2.38,
2352
+ "learning_rate": 2e-05,
2353
+ "loss": 0.1654,
2354
+ "step": 391
2355
+ },
2356
+ {
2357
+ "epoch": 2.39,
2358
+ "learning_rate": 2e-05,
2359
+ "loss": 0.16,
2360
+ "step": 392
2361
+ },
2362
+ {
2363
+ "epoch": 2.4,
2364
+ "learning_rate": 2e-05,
2365
+ "loss": 0.1528,
2366
+ "step": 393
2367
+ },
2368
+ {
2369
+ "epoch": 2.4,
2370
+ "learning_rate": 2e-05,
2371
+ "loss": 0.1653,
2372
+ "step": 394
2373
+ },
2374
+ {
2375
+ "epoch": 2.41,
2376
+ "learning_rate": 2e-05,
2377
+ "loss": 0.1619,
2378
+ "step": 395
2379
+ },
2380
+ {
2381
+ "epoch": 2.41,
2382
+ "learning_rate": 2e-05,
2383
+ "loss": 0.1544,
2384
+ "step": 396
2385
+ },
2386
+ {
2387
+ "epoch": 2.42,
2388
+ "learning_rate": 2e-05,
2389
+ "loss": 0.1675,
2390
+ "step": 397
2391
+ },
2392
+ {
2393
+ "epoch": 2.43,
2394
+ "learning_rate": 2e-05,
2395
+ "loss": 0.1733,
2396
+ "step": 398
2397
+ },
2398
+ {
2399
+ "epoch": 2.43,
2400
+ "learning_rate": 2e-05,
2401
+ "loss": 0.1523,
2402
+ "step": 399
2403
+ },
2404
+ {
2405
+ "epoch": 2.44,
2406
+ "learning_rate": 2e-05,
2407
+ "loss": 0.1631,
2408
+ "step": 400
2409
+ },
2410
+ {
2411
+ "epoch": 2.45,
2412
+ "learning_rate": 2e-05,
2413
+ "loss": 0.168,
2414
+ "step": 401
2415
+ },
2416
+ {
2417
+ "epoch": 2.45,
2418
+ "learning_rate": 2e-05,
2419
+ "loss": 0.1574,
2420
+ "step": 402
2421
+ },
2422
+ {
2423
+ "epoch": 2.46,
2424
+ "learning_rate": 2e-05,
2425
+ "loss": 0.1512,
2426
+ "step": 403
2427
+ },
2428
+ {
2429
+ "epoch": 2.46,
2430
+ "learning_rate": 2e-05,
2431
+ "loss": 0.1578,
2432
+ "step": 404
2433
+ },
2434
+ {
2435
+ "epoch": 2.47,
2436
+ "learning_rate": 2e-05,
2437
+ "loss": 0.1411,
2438
+ "step": 405
2439
+ },
2440
+ {
2441
+ "epoch": 2.48,
2442
+ "learning_rate": 2e-05,
2443
+ "loss": 0.1568,
2444
+ "step": 406
2445
+ },
2446
+ {
2447
+ "epoch": 2.48,
2448
+ "learning_rate": 2e-05,
2449
+ "loss": 0.1852,
2450
+ "step": 407
2451
+ },
2452
+ {
2453
+ "epoch": 2.49,
2454
+ "learning_rate": 2e-05,
2455
+ "loss": 0.124,
2456
+ "step": 408
2457
+ },
2458
+ {
2459
+ "epoch": 2.49,
2460
+ "learning_rate": 2e-05,
2461
+ "loss": 0.1692,
2462
+ "step": 409
2463
+ },
2464
+ {
2465
+ "epoch": 2.5,
2466
+ "learning_rate": 2e-05,
2467
+ "loss": 0.1602,
2468
+ "step": 410
2469
+ },
2470
+ {
2471
+ "epoch": 2.51,
2472
+ "learning_rate": 2e-05,
2473
+ "loss": 0.1721,
2474
+ "step": 411
2475
+ },
2476
+ {
2477
+ "epoch": 2.51,
2478
+ "learning_rate": 2e-05,
2479
+ "loss": 0.1541,
2480
+ "step": 412
2481
+ },
2482
+ {
2483
+ "epoch": 2.52,
2484
+ "learning_rate": 2e-05,
2485
+ "loss": 0.1462,
2486
+ "step": 413
2487
+ },
2488
+ {
2489
+ "epoch": 2.52,
2490
+ "learning_rate": 2e-05,
2491
+ "loss": 0.1569,
2492
+ "step": 414
2493
+ },
2494
+ {
2495
+ "epoch": 2.53,
2496
+ "learning_rate": 2e-05,
2497
+ "loss": 0.1622,
2498
+ "step": 415
2499
+ },
2500
+ {
2501
+ "epoch": 2.54,
2502
+ "learning_rate": 2e-05,
2503
+ "loss": 0.1652,
2504
+ "step": 416
2505
+ },
2506
+ {
2507
+ "epoch": 2.54,
2508
+ "learning_rate": 2e-05,
2509
+ "loss": 0.167,
2510
+ "step": 417
2511
+ },
2512
+ {
2513
+ "epoch": 2.55,
2514
+ "learning_rate": 2e-05,
2515
+ "loss": 0.1511,
2516
+ "step": 418
2517
+ },
2518
+ {
2519
+ "epoch": 2.55,
2520
+ "learning_rate": 2e-05,
2521
+ "loss": 0.1372,
2522
+ "step": 419
2523
+ },
2524
+ {
2525
+ "epoch": 2.56,
2526
+ "learning_rate": 2e-05,
2527
+ "loss": 0.15,
2528
+ "step": 420
2529
+ },
2530
+ {
2531
+ "epoch": 2.57,
2532
+ "learning_rate": 2e-05,
2533
+ "loss": 0.1584,
2534
+ "step": 421
2535
+ },
2536
+ {
2537
+ "epoch": 2.57,
2538
+ "learning_rate": 2e-05,
2539
+ "loss": 0.1606,
2540
+ "step": 422
2541
+ },
2542
+ {
2543
+ "epoch": 2.58,
2544
+ "learning_rate": 2e-05,
2545
+ "loss": 0.1569,
2546
+ "step": 423
2547
+ },
2548
+ {
2549
+ "epoch": 2.59,
2550
+ "learning_rate": 2e-05,
2551
+ "loss": 0.1763,
2552
+ "step": 424
2553
+ },
2554
+ {
2555
+ "epoch": 2.59,
2556
+ "learning_rate": 2e-05,
2557
+ "loss": 0.1517,
2558
+ "step": 425
2559
+ },
2560
+ {
2561
+ "epoch": 2.6,
2562
+ "learning_rate": 2e-05,
2563
+ "loss": 0.1569,
2564
+ "step": 426
2565
+ },
2566
+ {
2567
+ "epoch": 2.6,
2568
+ "learning_rate": 2e-05,
2569
+ "loss": 0.1896,
2570
+ "step": 427
2571
+ },
2572
+ {
2573
+ "epoch": 2.61,
2574
+ "learning_rate": 2e-05,
2575
+ "loss": 0.1402,
2576
+ "step": 428
2577
+ },
2578
+ {
2579
+ "epoch": 2.62,
2580
+ "learning_rate": 2e-05,
2581
+ "loss": 0.1613,
2582
+ "step": 429
2583
+ },
2584
+ {
2585
+ "epoch": 2.62,
2586
+ "learning_rate": 2e-05,
2587
+ "loss": 0.1566,
2588
+ "step": 430
2589
+ },
2590
+ {
2591
+ "epoch": 2.63,
2592
+ "learning_rate": 2e-05,
2593
+ "loss": 0.1556,
2594
+ "step": 431
2595
+ },
2596
+ {
2597
+ "epoch": 2.63,
2598
+ "learning_rate": 2e-05,
2599
+ "loss": 0.1581,
2600
+ "step": 432
2601
+ },
2602
+ {
2603
+ "epoch": 2.64,
2604
+ "learning_rate": 2e-05,
2605
+ "loss": 0.1572,
2606
+ "step": 433
2607
+ },
2608
+ {
2609
+ "epoch": 2.65,
2610
+ "learning_rate": 2e-05,
2611
+ "loss": 0.1613,
2612
+ "step": 434
2613
+ },
2614
+ {
2615
+ "epoch": 2.65,
2616
+ "learning_rate": 2e-05,
2617
+ "loss": 0.1454,
2618
+ "step": 435
2619
+ },
2620
+ {
2621
+ "epoch": 2.66,
2622
+ "learning_rate": 2e-05,
2623
+ "loss": 0.176,
2624
+ "step": 436
2625
+ },
2626
+ {
2627
+ "epoch": 2.66,
2628
+ "learning_rate": 2e-05,
2629
+ "loss": 0.1788,
2630
+ "step": 437
2631
+ },
2632
+ {
2633
+ "epoch": 2.67,
2634
+ "learning_rate": 2e-05,
2635
+ "loss": 0.1685,
2636
+ "step": 438
2637
+ },
2638
+ {
2639
+ "epoch": 2.68,
2640
+ "learning_rate": 2e-05,
2641
+ "loss": 0.1452,
2642
+ "step": 439
2643
+ },
2644
+ {
2645
+ "epoch": 2.68,
2646
+ "learning_rate": 2e-05,
2647
+ "loss": 0.1402,
2648
+ "step": 440
2649
+ },
2650
+ {
2651
+ "epoch": 2.69,
2652
+ "learning_rate": 2e-05,
2653
+ "loss": 0.158,
2654
+ "step": 441
2655
+ },
2656
+ {
2657
+ "epoch": 2.7,
2658
+ "learning_rate": 2e-05,
2659
+ "loss": 0.1637,
2660
+ "step": 442
2661
+ },
2662
+ {
2663
+ "epoch": 2.7,
2664
+ "learning_rate": 2e-05,
2665
+ "loss": 0.1667,
2666
+ "step": 443
2667
+ },
2668
+ {
2669
+ "epoch": 2.71,
2670
+ "learning_rate": 2e-05,
2671
+ "loss": 0.1841,
2672
+ "step": 444
2673
+ },
2674
+ {
2675
+ "epoch": 2.71,
2676
+ "learning_rate": 2e-05,
2677
+ "loss": 0.1604,
2678
+ "step": 445
2679
+ },
2680
+ {
2681
+ "epoch": 2.72,
2682
+ "learning_rate": 2e-05,
2683
+ "loss": 0.1517,
2684
+ "step": 446
2685
+ },
2686
+ {
2687
+ "epoch": 2.73,
2688
+ "learning_rate": 2e-05,
2689
+ "loss": 0.1727,
2690
+ "step": 447
2691
+ },
2692
+ {
2693
+ "epoch": 2.73,
2694
+ "learning_rate": 2e-05,
2695
+ "loss": 0.1555,
2696
+ "step": 448
2697
+ },
2698
+ {
2699
+ "epoch": 2.74,
2700
+ "learning_rate": 2e-05,
2701
+ "loss": 0.1465,
2702
+ "step": 449
2703
+ },
2704
+ {
2705
+ "epoch": 2.74,
2706
+ "learning_rate": 2e-05,
2707
+ "loss": 0.1685,
2708
+ "step": 450
2709
+ }
2710
+ ],
2711
+ "max_steps": 656,
2712
+ "num_train_epochs": 4,
2713
+ "total_flos": 30697938616320.0,
2714
+ "trial_name": null,
2715
+ "trial_params": null
2716
+ }
llava-llama-2-13b-chat-forecasting-finetune/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d5ac233a90f6199ef502f1c1721ec8a2ed4585cb99941d004bde111d8a88279
3
+ size 6651
llava-llama-2-13b-chat-forecasting-finetune/zero_to_fp32.py ADDED
@@ -0,0 +1,578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example: python zero_to_fp32.py . pytorch_model.bin
14
+
15
+ import argparse
16
+ import torch
17
+ import glob
18
+ import math
19
+ import os
20
+ import re
21
+ from collections import OrderedDict
22
+ from dataclasses import dataclass
23
+
24
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
25
+ # DeepSpeed data structures it has to be available in the current python environment.
26
+ from deepspeed.utils import logger
27
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
28
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
29
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
30
+
31
+
32
+ @dataclass
33
+ class zero_model_state:
34
+ buffers: dict()
35
+ param_shapes: dict()
36
+ shared_params: list
37
+ ds_version: int
38
+ frozen_param_shapes: dict()
39
+ frozen_param_fragments: dict()
40
+
41
+
42
+ debug = 0
43
+
44
+ # load to cpu
45
+ device = torch.device('cpu')
46
+
47
+
48
+ def atoi(text):
49
+ return int(text) if text.isdigit() else text
50
+
51
+
52
+ def natural_keys(text):
53
+ '''
54
+ alist.sort(key=natural_keys) sorts in human order
55
+ http://nedbatchelder.com/blog/200712/human_sorting.html
56
+ (See Toothy's implementation in the comments)
57
+ '''
58
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
59
+
60
+
61
+ def get_model_state_file(checkpoint_dir, zero_stage):
62
+ if not os.path.isdir(checkpoint_dir):
63
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
64
+
65
+ # there should be only one file
66
+ if zero_stage == 2:
67
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
68
+ elif zero_stage == 3:
69
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
70
+
71
+ if not os.path.exists(file):
72
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
73
+
74
+ return file
75
+
76
+
77
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
78
+ # XXX: need to test that this simple glob rule works for multi-node setup too
79
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
80
+
81
+ if len(ckpt_files) == 0:
82
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
83
+
84
+ return ckpt_files
85
+
86
+
87
+ def get_optim_files(checkpoint_dir):
88
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
89
+
90
+
91
+ def get_model_state_files(checkpoint_dir):
92
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
93
+
94
+
95
+ def parse_model_states(files):
96
+ zero_model_states = []
97
+ for file in files:
98
+ state_dict = torch.load(file, map_location=device)
99
+
100
+ if BUFFER_NAMES not in state_dict:
101
+ raise ValueError(f"{file} is not a model state checkpoint")
102
+ buffer_names = state_dict[BUFFER_NAMES]
103
+ if debug:
104
+ print("Found buffers:", buffer_names)
105
+
106
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
107
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
108
+ param_shapes = state_dict[PARAM_SHAPES]
109
+
110
+ # collect parameters that are included in param_shapes
111
+ param_names = []
112
+ for s in param_shapes:
113
+ for name in s.keys():
114
+ param_names.append(name)
115
+
116
+ # update with frozen parameters
117
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
118
+ if frozen_param_shapes is not None:
119
+ if debug:
120
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
121
+ param_names += list(frozen_param_shapes.keys())
122
+
123
+ # handle shared params
124
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
125
+
126
+ ds_version = state_dict.get(DS_VERSION, None)
127
+
128
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
129
+
130
+ z_model_state = zero_model_state(buffers=buffers,
131
+ param_shapes=param_shapes,
132
+ shared_params=shared_params,
133
+ ds_version=ds_version,
134
+ frozen_param_shapes=frozen_param_shapes,
135
+ frozen_param_fragments=frozen_param_fragments)
136
+ zero_model_states.append(z_model_state)
137
+
138
+ return zero_model_states
139
+
140
+
141
+ def parse_optim_states(files, ds_checkpoint_dir):
142
+
143
+ total_files = len(files)
144
+ state_dicts = []
145
+ for f in files:
146
+ state_dicts.append(torch.load(f, map_location=device))
147
+
148
+ if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]:
149
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
150
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
151
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
152
+
153
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
154
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
155
+ # use the max of the partition_count to get the dp world_size.
156
+
157
+ if type(world_size) is list:
158
+ world_size = max(world_size)
159
+
160
+ if world_size != total_files:
161
+ raise ValueError(
162
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
163
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
164
+ )
165
+
166
+ # the groups are named differently in each stage
167
+ if zero_stage == 2:
168
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
169
+ elif zero_stage == 3:
170
+ fp32_groups_key = FP32_FLAT_GROUPS
171
+ else:
172
+ raise ValueError(f"unknown zero stage {zero_stage}")
173
+
174
+ if zero_stage == 2:
175
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
176
+ elif zero_stage == 3:
177
+ # if there is more than one param group, there will be multiple flattened tensors - one
178
+ # flattened tensor per group - for simplicity merge them into a single tensor
179
+ #
180
+ # XXX: could make the script more memory efficient for when there are multiple groups - it
181
+ # will require matching the sub-lists of param_shapes for each param group flattened tensor
182
+
183
+ fp32_flat_groups = [
184
+ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts))
185
+ ]
186
+
187
+ return zero_stage, world_size, fp32_flat_groups
188
+
189
+
190
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir):
191
+ """
192
+ Returns fp32 state_dict reconstructed from ds checkpoint
193
+
194
+ Args:
195
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
196
+
197
+ """
198
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
199
+
200
+ optim_files = get_optim_files(ds_checkpoint_dir)
201
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
202
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
203
+
204
+ model_files = get_model_state_files(ds_checkpoint_dir)
205
+
206
+ zero_model_states = parse_model_states(model_files)
207
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
208
+
209
+ if zero_stage == 2:
210
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states)
211
+ elif zero_stage == 3:
212
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
248
+ param_shapes = zero_model_states[0].param_shapes
249
+
250
+ # Reconstruction protocol:
251
+ #
252
+ # XXX: document this
253
+
254
+ if debug:
255
+ for i in range(world_size):
256
+ for j in range(len(fp32_flat_groups[0])):
257
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
258
+
259
+ # XXX: memory usage doubles here (zero2)
260
+ num_param_groups = len(fp32_flat_groups[0])
261
+ merged_single_partition_of_fp32_groups = []
262
+ for i in range(num_param_groups):
263
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
264
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
265
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
266
+ avail_numel = sum(
267
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
268
+
269
+ if debug:
270
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
271
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
272
+ # not asserting if there is a mismatch due to possible padding
273
+ print(f"Have {avail_numel} numels to process.")
274
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
275
+
276
+ # params
277
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
278
+ # out-of-core computing solution
279
+ total_numel = 0
280
+ total_params = 0
281
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
282
+ offset = 0
283
+ avail_numel = full_single_fp32_vector.numel()
284
+ for name, shape in shapes.items():
285
+
286
+ unpartitioned_numel = shape.numel()
287
+ total_numel += unpartitioned_numel
288
+ total_params += 1
289
+
290
+ if debug:
291
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
292
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
293
+ offset += unpartitioned_numel
294
+
295
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
296
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
297
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
298
+ # live optimizer object, so we are checking that the numbers are within the right range
299
+ align_to = 2 * world_size
300
+
301
+ def zero2_align(x):
302
+ return align_to * math.ceil(x / align_to)
303
+
304
+ if debug:
305
+ print(f"original offset={offset}, avail_numel={avail_numel}")
306
+
307
+ offset = zero2_align(offset)
308
+ avail_numel = zero2_align(avail_numel)
309
+
310
+ if debug:
311
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
312
+
313
+ # Sanity check
314
+ if offset != avail_numel:
315
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
316
+
317
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
318
+
319
+
320
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states):
321
+ state_dict = OrderedDict()
322
+
323
+ # buffers
324
+ buffers = zero_model_states[0].buffers
325
+ state_dict.update(buffers)
326
+ if debug:
327
+ print(f"added {len(buffers)} buffers")
328
+
329
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
330
+
331
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
332
+
333
+ # recover shared parameters
334
+ for pair in zero_model_states[0].shared_params:
335
+ if pair[1] in state_dict:
336
+ state_dict[pair[0]] = state_dict[pair[1]]
337
+
338
+ return state_dict
339
+
340
+
341
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
342
+ remainder = unpartitioned_numel % world_size
343
+ padding_numel = (world_size - remainder) if remainder else 0
344
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
345
+ return partitioned_numel, padding_numel
346
+
347
+
348
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
349
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
350
+ return
351
+
352
+ if debug:
353
+ for i in range(world_size):
354
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
355
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
356
+
357
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
358
+ wanted_params = len(frozen_param_shapes)
359
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
360
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
361
+ print(f'Frozen params: Have {avail_numel} numels to process.')
362
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
363
+
364
+ total_params = 0
365
+ total_numel = 0
366
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
367
+ total_params += 1
368
+ unpartitioned_numel = shape.numel()
369
+ total_numel += unpartitioned_numel
370
+
371
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
372
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
373
+
374
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
375
+
376
+ if debug:
377
+ print(
378
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
379
+ )
380
+
381
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
382
+
383
+
384
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
385
+ param_shapes = zero_model_states[0].param_shapes
386
+ avail_numel = fp32_flat_groups[0].numel() * world_size
387
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
388
+ # param, re-consolidating each param, while dealing with padding if any
389
+
390
+ # merge list of dicts, preserving order
391
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
392
+
393
+ if debug:
394
+ for i in range(world_size):
395
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
396
+
397
+ wanted_params = len(param_shapes)
398
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
399
+ # not asserting if there is a mismatch due to possible padding
400
+ avail_numel = fp32_flat_groups[0].numel() * world_size
401
+ print(f"Trainable params: Have {avail_numel} numels to process.")
402
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
403
+
404
+ # params
405
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
406
+ # out-of-core computing solution
407
+ offset = 0
408
+ total_numel = 0
409
+ total_params = 0
410
+ for name, shape in param_shapes.items():
411
+
412
+ unpartitioned_numel = shape.numel()
413
+ total_numel += unpartitioned_numel
414
+ total_params += 1
415
+
416
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
417
+
418
+ if debug:
419
+ print(
420
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
421
+ )
422
+
423
+ # XXX: memory usage doubles here
424
+ state_dict[name] = torch.cat(
425
+ tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)),
426
+ 0).narrow(0, 0, unpartitioned_numel).view(shape)
427
+ offset += partitioned_numel
428
+
429
+ offset *= world_size
430
+
431
+ # Sanity check
432
+ if offset != avail_numel:
433
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
434
+
435
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
436
+
437
+
438
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states):
439
+ state_dict = OrderedDict()
440
+
441
+ # buffers
442
+ buffers = zero_model_states[0].buffers
443
+ state_dict.update(buffers)
444
+ if debug:
445
+ print(f"added {len(buffers)} buffers")
446
+
447
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
448
+
449
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
450
+
451
+ # recover shared parameters
452
+ for pair in zero_model_states[0].shared_params:
453
+ if pair[1] in state_dict:
454
+ state_dict[pair[0]] = state_dict[pair[1]]
455
+
456
+ return state_dict
457
+
458
+
459
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None):
460
+ """
461
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
462
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
463
+ via a model hub.
464
+
465
+ Args:
466
+ - ``checkpoint_dir``: path to the desired checkpoint folder
467
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
468
+
469
+ Returns:
470
+ - pytorch ``state_dict``
471
+
472
+ Note: this approach may not work if your application doesn't have sufficient free CPU memory and
473
+ you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
474
+ the checkpoint.
475
+
476
+ A typical usage might be ::
477
+
478
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
479
+ # do the training and checkpoint saving
480
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
481
+ model = model.cpu() # move to cpu
482
+ model.load_state_dict(state_dict)
483
+ # submit to model hub or save the model to share with others
484
+
485
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
486
+ application. i.e. you will need to re-initialize the deepspeed engine, since
487
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
488
+
489
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
490
+
491
+ """
492
+ if tag is None:
493
+ latest_path = os.path.join(checkpoint_dir, 'latest')
494
+ if os.path.isfile(latest_path):
495
+ with open(latest_path, 'r') as fd:
496
+ tag = fd.read().strip()
497
+ else:
498
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
499
+
500
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
501
+
502
+ if not os.path.isdir(ds_checkpoint_dir):
503
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
504
+
505
+ return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir)
506
+
507
+
508
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None):
509
+ """
510
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
511
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
512
+
513
+ Args:
514
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
515
+ - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin)
516
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
517
+ """
518
+
519
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
520
+ print(f"Saving fp32 state dict to {output_file}")
521
+ torch.save(state_dict, output_file)
522
+
523
+
524
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
525
+ """
526
+ 1. Put the provided model to cpu
527
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
528
+ 3. Load it into the provided model
529
+
530
+ Args:
531
+ - ``model``: the model object to update
532
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
533
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
534
+
535
+ Returns:
536
+ - ``model`: modified model
537
+
538
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
539
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
540
+ conveniently placed for you in the checkpoint folder.
541
+
542
+ A typical usage might be ::
543
+
544
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
545
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
546
+ # submit to model hub or save the model to share with others
547
+
548
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
549
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
550
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
551
+
552
+ """
553
+ logger.info(f"Extracting fp32 weights")
554
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
555
+
556
+ logger.info(f"Overwriting model with fp32 weights")
557
+ model = model.cpu()
558
+ model.load_state_dict(state_dict, strict=False)
559
+
560
+ return model
561
+
562
+
563
+ if __name__ == "__main__":
564
+
565
+ parser = argparse.ArgumentParser()
566
+ parser.add_argument("checkpoint_dir",
567
+ type=str,
568
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
569
+ parser.add_argument(
570
+ "output_file",
571
+ type=str,
572
+ help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)")
573
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
574
+ args = parser.parse_args()
575
+
576
+ debug = args.debug
577
+
578
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
scaleup_training_ego4d_eval.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7be3d661a26ae03d4621bc9e0231a0b2be7cef9bf4693743265eb2cac4578c7
3
+ size 7779511314