Athipan01 commited on
Commit
5e2150b
·
verified ·
1 Parent(s): a4fcd92

End of training

Browse files
Files changed (4) hide show
  1. README.md +5 -5
  2. adapter_model.safetensors +1 -1
  3. config.json +45 -0
  4. tokenizer.json +1 -6
README.md CHANGED
@@ -15,15 +15,15 @@ should probably proofread and complete it, then remove this comment. -->
15
 
16
  This model is a fine-tuned version of [microsoft/codebert-base](https://huggingface.co/microsoft/codebert-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
- - eval_loss: 1.7713
19
- - eval_model_preparation_time: 0.0047
20
  - eval_accuracy: 0.095
21
  - eval_f1: 0.0165
22
  - eval_precision: 0.0090
23
  - eval_recall: 0.095
24
- - eval_runtime: 17.3603
25
- - eval_samples_per_second: 11.521
26
- - eval_steps_per_second: 2.88
27
  - step: 0
28
 
29
  ## Model description
 
15
 
16
  This model is a fine-tuned version of [microsoft/codebert-base](https://huggingface.co/microsoft/codebert-base) on an unknown dataset.
17
  It achieves the following results on the evaluation set:
18
+ - eval_loss: 1.9961
19
+ - eval_model_preparation_time: 0.0041
20
  - eval_accuracy: 0.095
21
  - eval_f1: 0.0165
22
  - eval_precision: 0.0090
23
  - eval_recall: 0.095
24
+ - eval_runtime: 17.4943
25
+ - eval_samples_per_second: 11.432
26
+ - eval_steps_per_second: 2.858
27
  - step: 0
28
 
29
  ## Model description
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:506a013407f9383136aea67e895e1ff397c16217173a422fc2e69ce13a28c36f
3
  size 4747504
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f449724978f5f984b9964f2b3869b19670329c0ccc11285534346796563d4af7
3
  size 4747504
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "architectures": [
4
+ "RobertaModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "LABEL_0",
15
+ "1": "LABEL_1",
16
+ "2": "LABEL_2",
17
+ "3": "LABEL_3",
18
+ "4": "LABEL_4",
19
+ "5": "LABEL_5"
20
+ },
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "LABEL_0": 0,
25
+ "LABEL_1": 1,
26
+ "LABEL_2": 2,
27
+ "LABEL_3": 3,
28
+ "LABEL_4": 4,
29
+ "LABEL_5": 5
30
+ },
31
+ "layer_norm_eps": 1e-05,
32
+ "max_position_embeddings": 514,
33
+ "model_type": "roberta",
34
+ "num_attention_heads": 12,
35
+ "num_hidden_layers": 12,
36
+ "output_past": true,
37
+ "pad_token_id": 1,
38
+ "position_embedding_type": "absolute",
39
+ "problem_type": "single_label_classification",
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.51.3",
42
+ "type_vocab_size": 1,
43
+ "use_cache": true,
44
+ "vocab_size": 50265
45
+ }
tokenizer.json CHANGED
@@ -1,11 +1,6 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 128,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
  "padding": null,
10
  "added_tokens": [
11
  {
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
 
 
 
 
 
4
  "padding": null,
5
  "added_tokens": [
6
  {