Upload TFRobertaForQuestionAnswering
Browse files- README.md +55 -0
- config.json +26 -0
- tf_model.h5 +3 -0
    	
        README.md
    ADDED
    
    | @@ -0,0 +1,55 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            ---
         | 
| 2 | 
            +
            license: apache-2.0
         | 
| 3 | 
            +
            base_model: distilroberta-base
         | 
| 4 | 
            +
            tags:
         | 
| 5 | 
            +
            - generated_from_keras_callback
         | 
| 6 | 
            +
            model-index:
         | 
| 7 | 
            +
            - name: transformers-qa-2
         | 
| 8 | 
            +
              results: []
         | 
| 9 | 
            +
            ---
         | 
| 10 | 
            +
             | 
| 11 | 
            +
            <!-- This model card has been generated automatically according to the information Keras had access to. You should
         | 
| 12 | 
            +
            probably proofread and complete it, then remove this comment. -->
         | 
| 13 | 
            +
             | 
| 14 | 
            +
            # transformers-qa-2
         | 
| 15 | 
            +
             | 
| 16 | 
            +
            This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on an unknown dataset.
         | 
| 17 | 
            +
            It achieves the following results on the evaluation set:
         | 
| 18 | 
            +
            - Train Loss: 0.9500
         | 
| 19 | 
            +
            - Validation Loss: 1.1014
         | 
| 20 | 
            +
            - Epoch: 1
         | 
| 21 | 
            +
             | 
| 22 | 
            +
            ## Model description
         | 
| 23 | 
            +
             | 
| 24 | 
            +
            More information needed
         | 
| 25 | 
            +
             | 
| 26 | 
            +
            ## Intended uses & limitations
         | 
| 27 | 
            +
             | 
| 28 | 
            +
            More information needed
         | 
| 29 | 
            +
             | 
| 30 | 
            +
            ## Training and evaluation data
         | 
| 31 | 
            +
             | 
| 32 | 
            +
            More information needed
         | 
| 33 | 
            +
             | 
| 34 | 
            +
            ## Training procedure
         | 
| 35 | 
            +
             | 
| 36 | 
            +
            ### Training hyperparameters
         | 
| 37 | 
            +
             | 
| 38 | 
            +
            The following hyperparameters were used during training:
         | 
| 39 | 
            +
            - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': 3e-05, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False}
         | 
| 40 | 
            +
            - training_precision: mixed_float16
         | 
| 41 | 
            +
             | 
| 42 | 
            +
            ### Training results
         | 
| 43 | 
            +
             | 
| 44 | 
            +
            | Train Loss | Validation Loss | Epoch |
         | 
| 45 | 
            +
            |:----------:|:---------------:|:-----:|
         | 
| 46 | 
            +
            | 1.4034     | 1.1588          | 0     |
         | 
| 47 | 
            +
            | 0.9500     | 1.1014          | 1     |
         | 
| 48 | 
            +
             | 
| 49 | 
            +
             | 
| 50 | 
            +
            ### Framework versions
         | 
| 51 | 
            +
             | 
| 52 | 
            +
            - Transformers 4.35.2
         | 
| 53 | 
            +
            - TensorFlow 2.15.0
         | 
| 54 | 
            +
            - Datasets 2.16.1
         | 
| 55 | 
            +
            - Tokenizers 0.15.0
         | 
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,26 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "distilroberta-base",
         | 
| 3 | 
            +
              "architectures": [
         | 
| 4 | 
            +
                "RobertaForQuestionAnswering"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "attention_probs_dropout_prob": 0.1,
         | 
| 7 | 
            +
              "bos_token_id": 0,
         | 
| 8 | 
            +
              "classifier_dropout": null,
         | 
| 9 | 
            +
              "eos_token_id": 2,
         | 
| 10 | 
            +
              "hidden_act": "gelu",
         | 
| 11 | 
            +
              "hidden_dropout_prob": 0.1,
         | 
| 12 | 
            +
              "hidden_size": 768,
         | 
| 13 | 
            +
              "initializer_range": 0.02,
         | 
| 14 | 
            +
              "intermediate_size": 3072,
         | 
| 15 | 
            +
              "layer_norm_eps": 1e-05,
         | 
| 16 | 
            +
              "max_position_embeddings": 514,
         | 
| 17 | 
            +
              "model_type": "roberta",
         | 
| 18 | 
            +
              "num_attention_heads": 12,
         | 
| 19 | 
            +
              "num_hidden_layers": 6,
         | 
| 20 | 
            +
              "pad_token_id": 1,
         | 
| 21 | 
            +
              "position_embedding_type": "absolute",
         | 
| 22 | 
            +
              "transformers_version": "4.35.2",
         | 
| 23 | 
            +
              "type_vocab_size": 1,
         | 
| 24 | 
            +
              "use_cache": true,
         | 
| 25 | 
            +
              "vocab_size": 50265
         | 
| 26 | 
            +
            }
         | 
    	
        tf_model.h5
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:2a56e774806c72b003eacc0cabcb6d0d5799a40e338338aeaf1dfaee220fcbd9
         | 
| 3 | 
            +
            size 326264328
         |