add model
Browse files- config.json +31 -0
- modeling_roberta.py +24 -0
- pytorch_model.bin +3 -0
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,31 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "/content/drive/MyDrive/ColabModels/XROBERTA_USE_QA/pytorch_model/",
         | 
| 3 | 
            +
              "architectures": [
         | 
| 4 | 
            +
                "XLMRobertaModel"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "attention_probs_dropout_prob": 0.1,
         | 
| 7 | 
            +
              "auto_map": {
         | 
| 8 | 
            +
                "AutoModel": "modeling_roberta.XLMRobertaModel"
         | 
| 9 | 
            +
              },
         | 
| 10 | 
            +
              "bos_token_id": 0,
         | 
| 11 | 
            +
              "classifier_dropout": null,
         | 
| 12 | 
            +
              "eos_token_id": 2,
         | 
| 13 | 
            +
              "hidden_act": "gelu",
         | 
| 14 | 
            +
              "hidden_dropout_prob": 0.1,
         | 
| 15 | 
            +
              "hidden_size": 768,
         | 
| 16 | 
            +
              "initializer_range": 0.02,
         | 
| 17 | 
            +
              "intermediate_size": 3072,
         | 
| 18 | 
            +
              "layer_norm_eps": 1e-12,
         | 
| 19 | 
            +
              "max_position_embeddings": 514,
         | 
| 20 | 
            +
              "model_type": "xlm-roberta",
         | 
| 21 | 
            +
              "num_attention_heads": 12,
         | 
| 22 | 
            +
              "num_hidden_layers": 12,
         | 
| 23 | 
            +
              "pad_token_id": 1,
         | 
| 24 | 
            +
              "position_embedding_type": "absolute",
         | 
| 25 | 
            +
              "roberta": 1,
         | 
| 26 | 
            +
              "torch_dtype": "float32",
         | 
| 27 | 
            +
              "transformers_version": "4.18.0",
         | 
| 28 | 
            +
              "type_vocab_size": 1,
         | 
| 29 | 
            +
              "use_cache": true,
         | 
| 30 | 
            +
              "vocab_size": 250002
         | 
| 31 | 
            +
            }
         | 
    	
        modeling_roberta.py
    ADDED
    
    | @@ -0,0 +1,24 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            import torch
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            from transformers import XLMRobertaModel as XLMRobertaModelBase
         | 
| 4 | 
            +
             | 
| 5 | 
            +
             | 
| 6 | 
            +
            class XLMRobertaModel(XLMRobertaModelBase):
         | 
| 7 | 
            +
                def __init__(self, config):
         | 
| 8 | 
            +
                    super().__init__(config)
         | 
| 9 | 
            +
                    self.question_projection = torch.nn.Linear(768, 512)
         | 
| 10 | 
            +
                    self.answer_projection = torch.nn.Linear(768, 512)
         | 
| 11 | 
            +
             | 
| 12 | 
            +
                def _embed(self, input_ids, attention_mask, projection):
         | 
| 13 | 
            +
                    outputs = super().__call__(input_ids, attention_mask=attention_mask)
         | 
| 14 | 
            +
                    sequence_output = outputs[0]
         | 
| 15 | 
            +
             | 
| 16 | 
            +
                    input_mask_expanded = attention_mask.unsqueeze(-1).expand(sequence_output.size()).float()
         | 
| 17 | 
            +
                    embeddings = torch.sum(sequence_output * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
         | 
| 18 | 
            +
                    return torch.tanh(projection(embeddings))
         | 
| 19 | 
            +
             | 
| 20 | 
            +
                def question(self, input_ids, attention_mask):
         | 
| 21 | 
            +
                    return self._embed(input_ids, attention_mask, self.question_projection)
         | 
| 22 | 
            +
             | 
| 23 | 
            +
                def answer(self, input_ids, attention_mask):
         | 
| 24 | 
            +
                    return self._embed(input_ids, attention_mask, self.answer_projection)
         | 
    	
        pytorch_model.bin
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:1e3ded1f396d3f20cdb3249faefa44c373f3f59d5adaa19542eb4d20ffc3b908
         | 
| 3 | 
            +
            size 1115392297
         |