Upload folder using huggingface_hub
Browse files
llama2-no-cotraining+7b/checkpoints/latest-checkpoint.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dd8e17767d6306b2ea80066876eaec1fb659a1f5ff91a4b4b1e99f8ea248d8a1
|
| 3 |
+
size 13519939694
|
llama2-no-cotraining+7b/config.json
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"dataset": {
|
| 3 |
+
"align_stage_components": [
|
| 4 |
+
"download/llava-laion-cc-sbu-558k/chat.json",
|
| 5 |
+
"download/llava-laion-cc-sbu-558k"
|
| 6 |
+
],
|
| 7 |
+
"dataset_id": "llava-multimodal",
|
| 8 |
+
"dataset_root_dir": "data",
|
| 9 |
+
"finetune_stage_components": [
|
| 10 |
+
"download/llava-v1.5-instruct/llava_v1_5_stripped625k.json",
|
| 11 |
+
"download/llava-v1.5-instruct"
|
| 12 |
+
],
|
| 13 |
+
"type": "llava-multimodal"
|
| 14 |
+
},
|
| 15 |
+
"hf_token": ".hf_token",
|
| 16 |
+
"model": {
|
| 17 |
+
"align_epochs": 1,
|
| 18 |
+
"align_global_batch_size": 256,
|
| 19 |
+
"align_learning_rate": 0.001,
|
| 20 |
+
"align_lr_scheduler_type": "linear-warmup+cosine-decay",
|
| 21 |
+
"align_max_grad_norm": 1.0,
|
| 22 |
+
"align_max_steps": null,
|
| 23 |
+
"align_per_device_batch_size": 16,
|
| 24 |
+
"align_train_strategy": "fsdp-shard-grad-op",
|
| 25 |
+
"align_warmup_ratio": 0.03,
|
| 26 |
+
"align_weight_decay": 0.0,
|
| 27 |
+
"arch_specifier": "no-align+gelu-mlp",
|
| 28 |
+
"enable_gradient_checkpointing": true,
|
| 29 |
+
"enable_mixed_precision_training": true,
|
| 30 |
+
"finetune_epochs": 1,
|
| 31 |
+
"finetune_global_batch_size": 128,
|
| 32 |
+
"finetune_learning_rate": 2e-05,
|
| 33 |
+
"finetune_lr_scheduler_type": "linear-warmup+cosine-decay",
|
| 34 |
+
"finetune_max_grad_norm": 1.0,
|
| 35 |
+
"finetune_max_steps": null,
|
| 36 |
+
"finetune_per_device_batch_size": 16,
|
| 37 |
+
"finetune_train_strategy": "fsdp-full-shard",
|
| 38 |
+
"finetune_warmup_ratio": 0.03,
|
| 39 |
+
"finetune_weight_decay": 0.1,
|
| 40 |
+
"image_resize_strategy": "letterbox",
|
| 41 |
+
"llm_backbone_id": "llama2-7b-pure",
|
| 42 |
+
"llm_max_length": 2048,
|
| 43 |
+
"model_id": "llama2-no-cotraining+7b",
|
| 44 |
+
"reduce_in_full_precision": false,
|
| 45 |
+
"type": "llama2-no-cotraining+7b",
|
| 46 |
+
"vision_backbone_id": "clip-vit-l-336px"
|
| 47 |
+
},
|
| 48 |
+
"pretrained_checkpoint": null,
|
| 49 |
+
"run_id": "llama2-no-cotraining+7b",
|
| 50 |
+
"run_root_dir": "runs",
|
| 51 |
+
"seed": 7,
|
| 52 |
+
"stage": "finetune",
|
| 53 |
+
"trackers": [
|
| 54 |
+
"jsonl",
|
| 55 |
+
"wandb"
|
| 56 |
+
],
|
| 57 |
+
"wandb_entity": null,
|
| 58 |
+
"wandb_project": "prismatic"
|
| 59 |
+
}
|
llama2-no-cotraining+7b/config.yaml
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
dataset:
|
| 2 |
+
align_stage_components:
|
| 3 |
+
- download/llava-laion-cc-sbu-558k/chat.json
|
| 4 |
+
- download/llava-laion-cc-sbu-558k
|
| 5 |
+
dataset_id: llava-multimodal
|
| 6 |
+
dataset_root_dir: data
|
| 7 |
+
finetune_stage_components:
|
| 8 |
+
- download/llava-v1.5-instruct/llava_v1_5_stripped625k.json
|
| 9 |
+
- download/llava-v1.5-instruct
|
| 10 |
+
type: llava-multimodal
|
| 11 |
+
hf_token: .hf_token
|
| 12 |
+
model:
|
| 13 |
+
align_epochs: 1
|
| 14 |
+
align_global_batch_size: 256
|
| 15 |
+
align_learning_rate: 0.001
|
| 16 |
+
align_lr_scheduler_type: linear-warmup+cosine-decay
|
| 17 |
+
align_max_grad_norm: 1.0
|
| 18 |
+
align_max_steps: null
|
| 19 |
+
align_per_device_batch_size: 16
|
| 20 |
+
align_train_strategy: fsdp-shard-grad-op
|
| 21 |
+
align_warmup_ratio: 0.03
|
| 22 |
+
align_weight_decay: 0.0
|
| 23 |
+
arch_specifier: no-align+gelu-mlp
|
| 24 |
+
enable_gradient_checkpointing: true
|
| 25 |
+
enable_mixed_precision_training: true
|
| 26 |
+
finetune_epochs: 1
|
| 27 |
+
finetune_global_batch_size: 128
|
| 28 |
+
finetune_learning_rate: 2.0e-05
|
| 29 |
+
finetune_lr_scheduler_type: linear-warmup+cosine-decay
|
| 30 |
+
finetune_max_grad_norm: 1.0
|
| 31 |
+
finetune_max_steps: null
|
| 32 |
+
finetune_per_device_batch_size: 16
|
| 33 |
+
finetune_train_strategy: fsdp-full-shard
|
| 34 |
+
finetune_warmup_ratio: 0.03
|
| 35 |
+
finetune_weight_decay: 0.1
|
| 36 |
+
image_resize_strategy: letterbox
|
| 37 |
+
llm_backbone_id: llama2-7b-pure
|
| 38 |
+
llm_max_length: 2048
|
| 39 |
+
model_id: llama2-no-cotraining+7b
|
| 40 |
+
reduce_in_full_precision: false
|
| 41 |
+
type: llama2-no-cotraining+7b
|
| 42 |
+
vision_backbone_id: clip-vit-l-336px
|
| 43 |
+
pretrained_checkpoint: null
|
| 44 |
+
run_id: llama2-no-cotraining+7b
|
| 45 |
+
run_root_dir: runs
|
| 46 |
+
seed: 7
|
| 47 |
+
stage: finetune
|
| 48 |
+
trackers:
|
| 49 |
+
- jsonl
|
| 50 |
+
- wandb
|
| 51 |
+
wandb_entity: null
|
| 52 |
+
wandb_project: prismatic
|
llama2-no-cotraining+7b/run-metrics.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|