Ina-Developer commited on
Commit
186dc5a
·
verified ·
1 Parent(s): 4bd2928

Training in progress, epoch 1

Browse files
README.md CHANGED
@@ -34,11 +34,11 @@ This model was trained with SFT.
34
 
35
  ### Framework versions
36
 
37
- - TRL: 0.15.2
38
- - Transformers: 4.51.3
39
- - Pytorch: 2.7.0
40
- - Datasets: 3.6.0
41
- - Tokenizers: 0.21.1
42
 
43
  ## Citations
44
 
@@ -49,7 +49,7 @@ Cite TRL as:
49
  ```bibtex
50
  @misc{vonwerra2022trl,
51
  title = {{TRL: Transformer Reinforcement Learning}},
52
- author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallouédec},
53
  year = 2020,
54
  journal = {GitHub repository},
55
  publisher = {GitHub},
 
34
 
35
  ### Framework versions
36
 
37
+ - TRL: 0.22.1
38
+ - Transformers: 4.55.3
39
+ - Pytorch: 2.8.0
40
+ - Datasets: 4.0.0
41
+ - Tokenizers: 0.21.4
42
 
43
  ## Citations
44
 
 
49
  ```bibtex
50
  @misc{vonwerra2022trl,
51
  title = {{TRL: Transformer Reinforcement Learning}},
52
+ author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang and Kashif Rasul and Quentin Gallou{\'e}dec},
53
  year = 2020,
54
  journal = {GitHub repository},
55
  publisher = {GitHub},
adapter_config.json CHANGED
@@ -23,23 +23,26 @@
23
  "embed_tokens"
24
  ],
25
  "peft_type": "LORA",
 
26
  "r": 16,
27
  "rank_pattern": {},
28
  "revision": null,
29
  "target_modules": [
 
 
 
 
 
30
  "down_proj",
31
  "o_proj",
32
  "up_proj",
33
- "fc2",
34
- "out_proj",
35
- "q_proj",
36
- "gate_proj",
37
- "k_proj",
38
- "fc1",
39
- "v_proj"
40
  ],
 
41
  "task_type": "CAUSAL_LM",
42
  "trainable_token_indices": null,
43
  "use_dora": false,
 
44
  "use_rslora": false
45
  }
 
23
  "embed_tokens"
24
  ],
25
  "peft_type": "LORA",
26
+ "qalora_group_size": 16,
27
  "r": 16,
28
  "rank_pattern": {},
29
  "revision": null,
30
  "target_modules": [
31
+ "k_proj",
32
+ "fc2",
33
+ "gate_proj",
34
+ "q_proj",
35
+ "out_proj",
36
  "down_proj",
37
  "o_proj",
38
  "up_proj",
39
+ "v_proj",
40
+ "fc1"
 
 
 
 
 
41
  ],
42
+ "target_parameters": null,
43
  "task_type": "CAUSAL_LM",
44
  "trainable_token_indices": null,
45
  "use_dora": false,
46
+ "use_qalora": false,
47
  "use_rslora": false
48
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc5874ea6d71b608a376620ce61260e21d8a356f9aa3c01dc0897ea232b21eea
3
- size 6127551176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3609043850aeb9ade01488f331627cea9b173a9660d6c8e05044f755241b58f3
3
+ size 6127553104
chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
runs/Sep03_14-23-36_safe-gpu02/events.out.tfevents.1756902222.safe-gpu02.2768505.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ab7de18153e79d469ced73b8e91a74bedf81f289625b32b2a60e81e19ebecd6
3
+ size 8547
tokenizer_config.json CHANGED
@@ -51325,7 +51325,6 @@
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
51328
- "chat_template": "{{ bos_token }}\n{%- if messages[0]['role'] == 'system' -%}\n {%- if messages[0]['content'] is string -%}\n {%- set first_user_prefix = messages[0]['content'] + '\n\n' -%}\n {%- else -%}\n {%- set first_user_prefix = messages[0]['content'][0]['text'] + '\n\n' -%}\n {%- endif -%}\n {%- set loop_messages = messages[1:] -%}\n{%- else -%}\n {%- set first_user_prefix = \"\" -%}\n {%- set loop_messages = messages -%}\n{%- endif -%}\n{%- for message in loop_messages -%}\n {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}\n {{ raise_exception(\"Conversation roles must alternate user/assistant/user/assistant/...\") }}\n {%- endif -%}\n {%- if (message['role'] == 'assistant') -%}\n {%- set role = \"model\" -%}\n {%- else -%}\n {%- set role = message['role'] -%}\n {%- endif -%}\n {{ '<start_of_turn>' + role + '\n' + (first_user_prefix if loop.first else \"\") }}\n {%- if message['content'] is string -%}\n {{ message['content'] | trim }}\n {%- elif message['content'] is iterable -%}\n {%- for item in message['content'] -%}\n {%- if item['type'] == 'image' -%}\n {{ '<start_of_image>' }}\n {%- elif item['type'] == 'text' -%}\n {{ item['text'] | trim }}\n {%- endif -%}\n {%- endfor -%}\n {%- else -%}\n {{ raise_exception(\"Invalid content type\") }}\n {%- endif -%}\n {{ '<end_of_turn>\n' }}\n{%- endfor -%}\n{%- if add_generation_prompt -%}\n {{'<start_of_turn>model\n'}}\n{%- endif -%}\n",
51329
  "clean_up_tokenization_spaces": false,
51330
  "eoi_token": "<end_of_image>",
51331
  "eos_token": "<eos>",
 
51325
  },
51326
  "boi_token": "<start_of_image>",
51327
  "bos_token": "<bos>",
 
51328
  "clean_up_tokenization_spaces": false,
51329
  "eoi_token": "<end_of_image>",
51330
  "eos_token": "<eos>",
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18bdf9405f11a5737728678dd1164a9a83c967bdb6e5555220407da084cd3017
3
- size 6033
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0181900e83682b37e9af3ada77ac9e48810c83e28746481453d0ac522c8d1a3c
3
+ size 6161