Delete folder data/vision4math_clip_model with huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.  
							See raw diff
- data/vision4math_clip_model/clip-vit-large-patch14-336/.gitattributes +0 -28
- data/vision4math_clip_model/clip-vit-large-patch14-336/README.md +0 -50
- data/vision4math_clip_model/clip-vit-large-patch14-336/config.json +0 -179
- data/vision4math_clip_model/clip-vit-large-patch14-336/merges.txt +0 -0
- data/vision4math_clip_model/clip-vit-large-patch14-336/preprocessor_config.json +0 -19
- data/vision4math_clip_model/clip-vit-large-patch14-336/pytorch_model.bin +0 -3
- data/vision4math_clip_model/clip-vit-large-patch14-336/special_tokens_map.json +0 -1
- data/vision4math_clip_model/clip-vit-large-patch14-336/tf_model.h5 +0 -3
- data/vision4math_clip_model/clip-vit-large-patch14-336/tokenizer.json +0 -0
- data/vision4math_clip_model/clip-vit-large-patch14-336/tokenizer_config.json +0 -1
- data/vision4math_clip_model/clip-vit-large-patch14-336/vocab.json +0 -0
- data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/config.json +0 -31
- data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/merges.txt +0 -0
- data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/preprocessor_config.json +0 -28
- data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/pytorch_model.bin +0 -3
- data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/special_tokens_map.json +0 -30
- data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/tokenizer.json +0 -0
- data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/tokenizer_config.json +0 -31
- data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/vocab.json +0 -0
- data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/config.json +0 -31
- data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/merges.txt +0 -0
- data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/preprocessor_config.json +0 -28
- data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/pytorch_model.bin +0 -3
- data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/special_tokens_map.json +0 -30
- data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/tokenizer.json +0 -0
- data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/tokenizer_config.json +0 -31
- data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/vocab.json +0 -0
- data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/config.json +0 -31
- data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/merges.txt +0 -0
- data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/preprocessor_config.json +0 -28
- data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/pytorch_model.bin +0 -3
- data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/special_tokens_map.json +0 -30
- data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/tokenizer.json +0 -0
- data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/tokenizer_config.json +0 -31
- data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/vocab.json +0 -0
- data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/config.json +0 -31
- data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/merges.txt +0 -0
- data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/preprocessor_config.json +0 -28
- data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/pytorch_model.bin +0 -3
- data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/special_tokens_map.json +0 -30
- data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/tokenizer.json +0 -0
- data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/tokenizer_config.json +0 -31
- data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/vocab.json +0 -0
- data/vision4math_clip_model/negclip_fqa_epoch2/config.json +0 -31
- data/vision4math_clip_model/negclip_fqa_epoch2/merges.txt +0 -0
- data/vision4math_clip_model/negclip_fqa_epoch2/preprocessor_config.json +0 -28
- data/vision4math_clip_model/negclip_fqa_epoch2/pytorch_model.bin +0 -3
- data/vision4math_clip_model/negclip_fqa_epoch2/special_tokens_map.json +0 -30
- data/vision4math_clip_model/negclip_fqa_epoch2/tokenizer.json +0 -0
- data/vision4math_clip_model/negclip_fqa_epoch2/tokenizer_config.json +0 -31
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/.gitattributes
    DELETED
    
    | @@ -1,28 +0,0 @@ | |
| 1 | 
            -
            *.7z filter=lfs diff=lfs merge=lfs -text
         | 
| 2 | 
            -
            *.arrow filter=lfs diff=lfs merge=lfs -text
         | 
| 3 | 
            -
            *.bin filter=lfs diff=lfs merge=lfs -text
         | 
| 4 | 
            -
            *.bin.* filter=lfs diff=lfs merge=lfs -text
         | 
| 5 | 
            -
            *.bz2 filter=lfs diff=lfs merge=lfs -text
         | 
| 6 | 
            -
            *.ftz filter=lfs diff=lfs merge=lfs -text
         | 
| 7 | 
            -
            *.gz filter=lfs diff=lfs merge=lfs -text
         | 
| 8 | 
            -
            *.h5 filter=lfs diff=lfs merge=lfs -text
         | 
| 9 | 
            -
            *.joblib filter=lfs diff=lfs merge=lfs -text
         | 
| 10 | 
            -
            *.lfs.* filter=lfs diff=lfs merge=lfs -text
         | 
| 11 | 
            -
            *.model filter=lfs diff=lfs merge=lfs -text
         | 
| 12 | 
            -
            *.msgpack filter=lfs diff=lfs merge=lfs -text
         | 
| 13 | 
            -
            *.onnx filter=lfs diff=lfs merge=lfs -text
         | 
| 14 | 
            -
            *.ot filter=lfs diff=lfs merge=lfs -text
         | 
| 15 | 
            -
            *.parquet filter=lfs diff=lfs merge=lfs -text
         | 
| 16 | 
            -
            *.pb filter=lfs diff=lfs merge=lfs -text
         | 
| 17 | 
            -
            *.pt filter=lfs diff=lfs merge=lfs -text
         | 
| 18 | 
            -
            *.pth filter=lfs diff=lfs merge=lfs -text
         | 
| 19 | 
            -
            *.rar filter=lfs diff=lfs merge=lfs -text
         | 
| 20 | 
            -
            saved_model/**/* filter=lfs diff=lfs merge=lfs -text
         | 
| 21 | 
            -
            *.tar.* filter=lfs diff=lfs merge=lfs -text
         | 
| 22 | 
            -
            *.tflite filter=lfs diff=lfs merge=lfs -text
         | 
| 23 | 
            -
            *.tgz filter=lfs diff=lfs merge=lfs -text
         | 
| 24 | 
            -
            *.wasm filter=lfs diff=lfs merge=lfs -text
         | 
| 25 | 
            -
            *.xz filter=lfs diff=lfs merge=lfs -text
         | 
| 26 | 
            -
            *.zip filter=lfs diff=lfs merge=lfs -text
         | 
| 27 | 
            -
            *.zstandard filter=lfs diff=lfs merge=lfs -text
         | 
| 28 | 
            -
            *tfevents* filter=lfs diff=lfs merge=lfs -text
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/README.md
    DELETED
    
    | @@ -1,50 +0,0 @@ | |
| 1 | 
            -
            ---
         | 
| 2 | 
            -
            tags:
         | 
| 3 | 
            -
            - generated_from_keras_callback
         | 
| 4 | 
            -
            widget:
         | 
| 5 | 
            -
            - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png
         | 
| 6 | 
            -
              candidate_labels: playing music, playing sports
         | 
| 7 | 
            -
              example_title: Cat & Dog
         | 
| 8 | 
            -
            model-index:
         | 
| 9 | 
            -
            - name: clip-vit-large-patch14-336
         | 
| 10 | 
            -
              results: []
         | 
| 11 | 
            -
            ---
         | 
| 12 | 
            -
             | 
| 13 | 
            -
            <!-- This model card has been generated automatically according to the information Keras had access to. You should
         | 
| 14 | 
            -
            probably proofread and complete it, then remove this comment. -->
         | 
| 15 | 
            -
             | 
| 16 | 
            -
            # clip-vit-large-patch14-336
         | 
| 17 | 
            -
             | 
| 18 | 
            -
            This model was trained from scratch on an unknown dataset.
         | 
| 19 | 
            -
            It achieves the following results on the evaluation set:
         | 
| 20 | 
            -
             | 
| 21 | 
            -
             | 
| 22 | 
            -
            ## Model description
         | 
| 23 | 
            -
             | 
| 24 | 
            -
            More information needed
         | 
| 25 | 
            -
             | 
| 26 | 
            -
            ## Intended uses & limitations
         | 
| 27 | 
            -
             | 
| 28 | 
            -
            More information needed
         | 
| 29 | 
            -
             | 
| 30 | 
            -
            ## Training and evaluation data
         | 
| 31 | 
            -
             | 
| 32 | 
            -
            More information needed
         | 
| 33 | 
            -
             | 
| 34 | 
            -
            ## Training procedure
         | 
| 35 | 
            -
             | 
| 36 | 
            -
            ### Training hyperparameters
         | 
| 37 | 
            -
             | 
| 38 | 
            -
            The following hyperparameters were used during training:
         | 
| 39 | 
            -
            - optimizer: None
         | 
| 40 | 
            -
            - training_precision: float32
         | 
| 41 | 
            -
             | 
| 42 | 
            -
            ### Training results
         | 
| 43 | 
            -
             | 
| 44 | 
            -
             | 
| 45 | 
            -
             | 
| 46 | 
            -
            ### Framework versions
         | 
| 47 | 
            -
             | 
| 48 | 
            -
            - Transformers 4.21.3
         | 
| 49 | 
            -
            - TensorFlow 2.8.2
         | 
| 50 | 
            -
            - Tokenizers 0.12.1
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/config.json
    DELETED
    
    | @@ -1,179 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "_name_or_path": "openai/clip-vit-large-patch14-336",
         | 
| 3 | 
            -
              "architectures": [
         | 
| 4 | 
            -
                "CLIPModel"
         | 
| 5 | 
            -
              ],
         | 
| 6 | 
            -
              "initializer_factor": 1.0,
         | 
| 7 | 
            -
              "logit_scale_init_value": 2.6592,
         | 
| 8 | 
            -
              "model_type": "clip",
         | 
| 9 | 
            -
              "projection_dim": 768,
         | 
| 10 | 
            -
              "text_config": {
         | 
| 11 | 
            -
                "_name_or_path": "",
         | 
| 12 | 
            -
                "add_cross_attention": false,
         | 
| 13 | 
            -
                "architectures": null,
         | 
| 14 | 
            -
                "attention_dropout": 0.0,
         | 
| 15 | 
            -
                "bad_words_ids": null,
         | 
| 16 | 
            -
                "bos_token_id": 0,
         | 
| 17 | 
            -
                "chunk_size_feed_forward": 0,
         | 
| 18 | 
            -
                "cross_attention_hidden_size": null,
         | 
| 19 | 
            -
                "decoder_start_token_id": null,
         | 
| 20 | 
            -
                "diversity_penalty": 0.0,
         | 
| 21 | 
            -
                "do_sample": false,
         | 
| 22 | 
            -
                "dropout": 0.0,
         | 
| 23 | 
            -
                "early_stopping": false,
         | 
| 24 | 
            -
                "encoder_no_repeat_ngram_size": 0,
         | 
| 25 | 
            -
                "eos_token_id": 2,
         | 
| 26 | 
            -
                "exponential_decay_length_penalty": null,
         | 
| 27 | 
            -
                "finetuning_task": null,
         | 
| 28 | 
            -
                "forced_bos_token_id": null,
         | 
| 29 | 
            -
                "forced_eos_token_id": null,
         | 
| 30 | 
            -
                "hidden_act": "quick_gelu",
         | 
| 31 | 
            -
                "hidden_size": 768,
         | 
| 32 | 
            -
                "id2label": {
         | 
| 33 | 
            -
                  "0": "LABEL_0",
         | 
| 34 | 
            -
                  "1": "LABEL_1"
         | 
| 35 | 
            -
                },
         | 
| 36 | 
            -
                "initializer_factor": 1.0,
         | 
| 37 | 
            -
                "initializer_range": 0.02,
         | 
| 38 | 
            -
                "intermediate_size": 3072,
         | 
| 39 | 
            -
                "is_decoder": false,
         | 
| 40 | 
            -
                "is_encoder_decoder": false,
         | 
| 41 | 
            -
                "label2id": {
         | 
| 42 | 
            -
                  "LABEL_0": 0,
         | 
| 43 | 
            -
                  "LABEL_1": 1
         | 
| 44 | 
            -
                },
         | 
| 45 | 
            -
                "layer_norm_eps": 1e-05,
         | 
| 46 | 
            -
                "length_penalty": 1.0,
         | 
| 47 | 
            -
                "max_length": 20,
         | 
| 48 | 
            -
                "max_position_embeddings": 77,
         | 
| 49 | 
            -
                "min_length": 0,
         | 
| 50 | 
            -
                "model_type": "clip_text_model",
         | 
| 51 | 
            -
                "no_repeat_ngram_size": 0,
         | 
| 52 | 
            -
                "num_attention_heads": 12,
         | 
| 53 | 
            -
                "num_beam_groups": 1,
         | 
| 54 | 
            -
                "num_beams": 1,
         | 
| 55 | 
            -
                "num_hidden_layers": 12,
         | 
| 56 | 
            -
                "num_return_sequences": 1,
         | 
| 57 | 
            -
                "output_attentions": false,
         | 
| 58 | 
            -
                "output_hidden_states": false,
         | 
| 59 | 
            -
                "output_scores": false,
         | 
| 60 | 
            -
                "pad_token_id": 1,
         | 
| 61 | 
            -
                "prefix": null,
         | 
| 62 | 
            -
                "problem_type": null,
         | 
| 63 | 
            -
                "projection_dim": 768,
         | 
| 64 | 
            -
                "pruned_heads": {},
         | 
| 65 | 
            -
                "remove_invalid_values": false,
         | 
| 66 | 
            -
                "repetition_penalty": 1.0,
         | 
| 67 | 
            -
                "return_dict": true,
         | 
| 68 | 
            -
                "return_dict_in_generate": false,
         | 
| 69 | 
            -
                "sep_token_id": null,
         | 
| 70 | 
            -
                "task_specific_params": null,
         | 
| 71 | 
            -
                "temperature": 1.0,
         | 
| 72 | 
            -
                "tf_legacy_loss": false,
         | 
| 73 | 
            -
                "tie_encoder_decoder": false,
         | 
| 74 | 
            -
                "tie_word_embeddings": true,
         | 
| 75 | 
            -
                "tokenizer_class": null,
         | 
| 76 | 
            -
                "top_k": 50,
         | 
| 77 | 
            -
                "top_p": 1.0,
         | 
| 78 | 
            -
                "torch_dtype": null,
         | 
| 79 | 
            -
                "torchscript": false,
         | 
| 80 | 
            -
                "transformers_version": "4.21.3",
         | 
| 81 | 
            -
                "typical_p": 1.0,
         | 
| 82 | 
            -
                "use_bfloat16": false,
         | 
| 83 | 
            -
                "vocab_size": 49408
         | 
| 84 | 
            -
              },
         | 
| 85 | 
            -
              "text_config_dict": {
         | 
| 86 | 
            -
                "hidden_size": 768,
         | 
| 87 | 
            -
                "intermediate_size": 3072,
         | 
| 88 | 
            -
                "num_attention_heads": 12,
         | 
| 89 | 
            -
                "num_hidden_layers": 12,
         | 
| 90 | 
            -
                "projection_dim": 768
         | 
| 91 | 
            -
              },
         | 
| 92 | 
            -
              "torch_dtype": "float32",
         | 
| 93 | 
            -
              "transformers_version": null,
         | 
| 94 | 
            -
              "vision_config": {
         | 
| 95 | 
            -
                "_name_or_path": "",
         | 
| 96 | 
            -
                "add_cross_attention": false,
         | 
| 97 | 
            -
                "architectures": null,
         | 
| 98 | 
            -
                "attention_dropout": 0.0,
         | 
| 99 | 
            -
                "bad_words_ids": null,
         | 
| 100 | 
            -
                "bos_token_id": null,
         | 
| 101 | 
            -
                "chunk_size_feed_forward": 0,
         | 
| 102 | 
            -
                "cross_attention_hidden_size": null,
         | 
| 103 | 
            -
                "decoder_start_token_id": null,
         | 
| 104 | 
            -
                "diversity_penalty": 0.0,
         | 
| 105 | 
            -
                "do_sample": false,
         | 
| 106 | 
            -
                "dropout": 0.0,
         | 
| 107 | 
            -
                "early_stopping": false,
         | 
| 108 | 
            -
                "encoder_no_repeat_ngram_size": 0,
         | 
| 109 | 
            -
                "eos_token_id": null,
         | 
| 110 | 
            -
                "exponential_decay_length_penalty": null,
         | 
| 111 | 
            -
                "finetuning_task": null,
         | 
| 112 | 
            -
                "forced_bos_token_id": null,
         | 
| 113 | 
            -
                "forced_eos_token_id": null,
         | 
| 114 | 
            -
                "hidden_act": "quick_gelu",
         | 
| 115 | 
            -
                "hidden_size": 1024,
         | 
| 116 | 
            -
                "id2label": {
         | 
| 117 | 
            -
                  "0": "LABEL_0",
         | 
| 118 | 
            -
                  "1": "LABEL_1"
         | 
| 119 | 
            -
                },
         | 
| 120 | 
            -
                "image_size": 336,
         | 
| 121 | 
            -
                "initializer_factor": 1.0,
         | 
| 122 | 
            -
                "initializer_range": 0.02,
         | 
| 123 | 
            -
                "intermediate_size": 4096,
         | 
| 124 | 
            -
                "is_decoder": false,
         | 
| 125 | 
            -
                "is_encoder_decoder": false,
         | 
| 126 | 
            -
                "label2id": {
         | 
| 127 | 
            -
                  "LABEL_0": 0,
         | 
| 128 | 
            -
                  "LABEL_1": 1
         | 
| 129 | 
            -
                },
         | 
| 130 | 
            -
                "layer_norm_eps": 1e-05,
         | 
| 131 | 
            -
                "length_penalty": 1.0,
         | 
| 132 | 
            -
                "max_length": 20,
         | 
| 133 | 
            -
                "min_length": 0,
         | 
| 134 | 
            -
                "model_type": "clip_vision_model",
         | 
| 135 | 
            -
                "no_repeat_ngram_size": 0,
         | 
| 136 | 
            -
                "num_attention_heads": 16,
         | 
| 137 | 
            -
                "num_beam_groups": 1,
         | 
| 138 | 
            -
                "num_beams": 1,
         | 
| 139 | 
            -
                "num_channels": 3,
         | 
| 140 | 
            -
                "num_hidden_layers": 24,
         | 
| 141 | 
            -
                "num_return_sequences": 1,
         | 
| 142 | 
            -
                "output_attentions": false,
         | 
| 143 | 
            -
                "output_hidden_states": false,
         | 
| 144 | 
            -
                "output_scores": false,
         | 
| 145 | 
            -
                "pad_token_id": null,
         | 
| 146 | 
            -
                "patch_size": 14,
         | 
| 147 | 
            -
                "prefix": null,
         | 
| 148 | 
            -
                "problem_type": null,
         | 
| 149 | 
            -
                "projection_dim": 768,
         | 
| 150 | 
            -
                "pruned_heads": {},
         | 
| 151 | 
            -
                "remove_invalid_values": false,
         | 
| 152 | 
            -
                "repetition_penalty": 1.0,
         | 
| 153 | 
            -
                "return_dict": true,
         | 
| 154 | 
            -
                "return_dict_in_generate": false,
         | 
| 155 | 
            -
                "sep_token_id": null,
         | 
| 156 | 
            -
                "task_specific_params": null,
         | 
| 157 | 
            -
                "temperature": 1.0,
         | 
| 158 | 
            -
                "tf_legacy_loss": false,
         | 
| 159 | 
            -
                "tie_encoder_decoder": false,
         | 
| 160 | 
            -
                "tie_word_embeddings": true,
         | 
| 161 | 
            -
                "tokenizer_class": null,
         | 
| 162 | 
            -
                "top_k": 50,
         | 
| 163 | 
            -
                "top_p": 1.0,
         | 
| 164 | 
            -
                "torch_dtype": null,
         | 
| 165 | 
            -
                "torchscript": false,
         | 
| 166 | 
            -
                "transformers_version": "4.21.3",
         | 
| 167 | 
            -
                "typical_p": 1.0,
         | 
| 168 | 
            -
                "use_bfloat16": false
         | 
| 169 | 
            -
              },
         | 
| 170 | 
            -
              "vision_config_dict": {
         | 
| 171 | 
            -
                "hidden_size": 1024,
         | 
| 172 | 
            -
                "image_size": 336,
         | 
| 173 | 
            -
                "intermediate_size": 4096,
         | 
| 174 | 
            -
                "num_attention_heads": 16,
         | 
| 175 | 
            -
                "num_hidden_layers": 24,
         | 
| 176 | 
            -
                "patch_size": 14,
         | 
| 177 | 
            -
                "projection_dim": 768
         | 
| 178 | 
            -
              }
         | 
| 179 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/merges.txt
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/preprocessor_config.json
    DELETED
    
    | @@ -1,19 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "crop_size": 336,
         | 
| 3 | 
            -
              "do_center_crop": true,
         | 
| 4 | 
            -
              "do_normalize": true,
         | 
| 5 | 
            -
              "do_resize": true,
         | 
| 6 | 
            -
              "feature_extractor_type": "CLIPFeatureExtractor",
         | 
| 7 | 
            -
              "image_mean": [
         | 
| 8 | 
            -
                0.48145466,
         | 
| 9 | 
            -
                0.4578275,
         | 
| 10 | 
            -
                0.40821073
         | 
| 11 | 
            -
              ],
         | 
| 12 | 
            -
              "image_std": [
         | 
| 13 | 
            -
                0.26862954,
         | 
| 14 | 
            -
                0.26130258,
         | 
| 15 | 
            -
                0.27577711
         | 
| 16 | 
            -
              ],
         | 
| 17 | 
            -
              "resample": 3,
         | 
| 18 | 
            -
              "size": 336
         | 
| 19 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/pytorch_model.bin
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:c6032c2e0caae3dc2d4fba35535fa6307dbb49df59c7e182b1bc4b3329b81801
         | 
| 3 | 
            -
            size 1711974081
         | 
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/special_tokens_map.json
    DELETED
    
    | @@ -1 +0,0 @@ | |
| 1 | 
            -
            {"bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": "<|endoftext|>"}
         | 
|  | |
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/tf_model.h5
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:d12828ca8f0f3c92194f277b7d893da7f2fb7824d0b99dedb305eb48eb46bb7f
         | 
| 3 | 
            -
            size 1712454232
         | 
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/tokenizer.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/tokenizer_config.json
    DELETED
    
    | @@ -1 +0,0 @@ | |
| 1 | 
            -
            {"unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|startoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": "<|endoftext|>", "add_prefix_space": false, "errors": "replace", "do_lower_case": true, "name_or_path": "openai/clip-vit-base-patch32", "model_max_length": 77, "special_tokens_map_file": "/home/suraj/.cache/huggingface/transformers/18a566598f286c9139f88160c99f84eec492a26bd22738fa9cb44d5b7e0a5c76.cce1206abbad28826f000510f22f354e53e66a97f7c23745a7dfe27609cc07f5", "tokenizer_class": "CLIPTokenizer"}
         | 
|  | |
|  | 
    	
        data/vision4math_clip_model/clip-vit-large-patch14-336/vocab.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "_name_or_path": "openai/clip-vit-large-patch14-336",
         | 
| 3 | 
            -
              "architectures": [
         | 
| 4 | 
            -
                "CLIPModel"
         | 
| 5 | 
            -
              ],
         | 
| 6 | 
            -
              "initializer_factor": 1.0,
         | 
| 7 | 
            -
              "logit_scale_init_value": 2.6592,
         | 
| 8 | 
            -
              "model_type": "clip",
         | 
| 9 | 
            -
              "projection_dim": 768,
         | 
| 10 | 
            -
              "text_config": {
         | 
| 11 | 
            -
                "dropout": 0.0,
         | 
| 12 | 
            -
                "hidden_size": 768,
         | 
| 13 | 
            -
                "intermediate_size": 3072,
         | 
| 14 | 
            -
                "model_type": "clip_text_model",
         | 
| 15 | 
            -
                "num_attention_heads": 12,
         | 
| 16 | 
            -
                "projection_dim": 768
         | 
| 17 | 
            -
              },
         | 
| 18 | 
            -
              "torch_dtype": "float32",
         | 
| 19 | 
            -
              "transformers_version": "4.44.2",
         | 
| 20 | 
            -
              "vision_config": {
         | 
| 21 | 
            -
                "dropout": 0.0,
         | 
| 22 | 
            -
                "hidden_size": 1024,
         | 
| 23 | 
            -
                "image_size": 336,
         | 
| 24 | 
            -
                "intermediate_size": 4096,
         | 
| 25 | 
            -
                "model_type": "clip_vision_model",
         | 
| 26 | 
            -
                "num_attention_heads": 16,
         | 
| 27 | 
            -
                "num_hidden_layers": 24,
         | 
| 28 | 
            -
                "patch_size": 14,
         | 
| 29 | 
            -
                "projection_dim": 768
         | 
| 30 | 
            -
              }
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/merges.txt
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/preprocessor_config.json
    DELETED
    
    | @@ -1,28 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "crop_size": {
         | 
| 3 | 
            -
                "height": 336,
         | 
| 4 | 
            -
                "width": 336
         | 
| 5 | 
            -
              },
         | 
| 6 | 
            -
              "do_center_crop": true,
         | 
| 7 | 
            -
              "do_convert_rgb": true,
         | 
| 8 | 
            -
              "do_normalize": true,
         | 
| 9 | 
            -
              "do_rescale": true,
         | 
| 10 | 
            -
              "do_resize": true,
         | 
| 11 | 
            -
              "image_mean": [
         | 
| 12 | 
            -
                0.48145466,
         | 
| 13 | 
            -
                0.4578275,
         | 
| 14 | 
            -
                0.40821073
         | 
| 15 | 
            -
              ],
         | 
| 16 | 
            -
              "image_processor_type": "CLIPImageProcessor",
         | 
| 17 | 
            -
              "image_std": [
         | 
| 18 | 
            -
                0.26862954,
         | 
| 19 | 
            -
                0.26130258,
         | 
| 20 | 
            -
                0.27577711
         | 
| 21 | 
            -
              ],
         | 
| 22 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 23 | 
            -
              "resample": 3,
         | 
| 24 | 
            -
              "rescale_factor": 0.00392156862745098,
         | 
| 25 | 
            -
              "size": {
         | 
| 26 | 
            -
                "shortest_edge": 336
         | 
| 27 | 
            -
              }
         | 
| 28 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/pytorch_model.bin
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:4ce4b68c937aef2d68ae325ba29d5b044b45a2fe3efb4c7b45a8b99eb5dc9f4a
         | 
| 3 | 
            -
            size 1711943042
         | 
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/special_tokens_map.json
    DELETED
    
    | @@ -1,30 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "bos_token": {
         | 
| 3 | 
            -
                "content": "<|startoftext|>",
         | 
| 4 | 
            -
                "lstrip": false,
         | 
| 5 | 
            -
                "normalized": true,
         | 
| 6 | 
            -
                "rstrip": false,
         | 
| 7 | 
            -
                "single_word": false
         | 
| 8 | 
            -
              },
         | 
| 9 | 
            -
              "eos_token": {
         | 
| 10 | 
            -
                "content": "<|endoftext|>",
         | 
| 11 | 
            -
                "lstrip": false,
         | 
| 12 | 
            -
                "normalized": false,
         | 
| 13 | 
            -
                "rstrip": false,
         | 
| 14 | 
            -
                "single_word": false
         | 
| 15 | 
            -
              },
         | 
| 16 | 
            -
              "pad_token": {
         | 
| 17 | 
            -
                "content": "<|endoftext|>",
         | 
| 18 | 
            -
                "lstrip": false,
         | 
| 19 | 
            -
                "normalized": false,
         | 
| 20 | 
            -
                "rstrip": false,
         | 
| 21 | 
            -
                "single_word": false
         | 
| 22 | 
            -
              },
         | 
| 23 | 
            -
              "unk_token": {
         | 
| 24 | 
            -
                "content": "<|endoftext|>",
         | 
| 25 | 
            -
                "lstrip": false,
         | 
| 26 | 
            -
                "normalized": false,
         | 
| 27 | 
            -
                "rstrip": false,
         | 
| 28 | 
            -
                "single_word": false
         | 
| 29 | 
            -
              }
         | 
| 30 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/tokenizer.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/tokenizer_config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "add_prefix_space": false,
         | 
| 3 | 
            -
              "added_tokens_decoder": {
         | 
| 4 | 
            -
                "49406": {
         | 
| 5 | 
            -
                  "content": "<|startoftext|>",
         | 
| 6 | 
            -
                  "lstrip": false,
         | 
| 7 | 
            -
                  "normalized": true,
         | 
| 8 | 
            -
                  "rstrip": false,
         | 
| 9 | 
            -
                  "single_word": false,
         | 
| 10 | 
            -
                  "special": true
         | 
| 11 | 
            -
                },
         | 
| 12 | 
            -
                "49407": {
         | 
| 13 | 
            -
                  "content": "<|endoftext|>",
         | 
| 14 | 
            -
                  "lstrip": false,
         | 
| 15 | 
            -
                  "normalized": false,
         | 
| 16 | 
            -
                  "rstrip": false,
         | 
| 17 | 
            -
                  "single_word": false,
         | 
| 18 | 
            -
                  "special": true
         | 
| 19 | 
            -
                }
         | 
| 20 | 
            -
              },
         | 
| 21 | 
            -
              "bos_token": "<|startoftext|>",
         | 
| 22 | 
            -
              "clean_up_tokenization_spaces": true,
         | 
| 23 | 
            -
              "do_lower_case": true,
         | 
| 24 | 
            -
              "eos_token": "<|endoftext|>",
         | 
| 25 | 
            -
              "errors": "replace",
         | 
| 26 | 
            -
              "model_max_length": 77,
         | 
| 27 | 
            -
              "pad_token": "<|endoftext|>",
         | 
| 28 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 29 | 
            -
              "tokenizer_class": "CLIPTokenizer",
         | 
| 30 | 
            -
              "unk_token": "<|endoftext|>"
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_dvqa_new_sampled_5e-6_epoch3/vocab.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "_name_or_path": "openai/clip-vit-large-patch14-336",
         | 
| 3 | 
            -
              "architectures": [
         | 
| 4 | 
            -
                "CLIPModel"
         | 
| 5 | 
            -
              ],
         | 
| 6 | 
            -
              "initializer_factor": 1.0,
         | 
| 7 | 
            -
              "logit_scale_init_value": 2.6592,
         | 
| 8 | 
            -
              "model_type": "clip",
         | 
| 9 | 
            -
              "projection_dim": 768,
         | 
| 10 | 
            -
              "text_config": {
         | 
| 11 | 
            -
                "dropout": 0.0,
         | 
| 12 | 
            -
                "hidden_size": 768,
         | 
| 13 | 
            -
                "intermediate_size": 3072,
         | 
| 14 | 
            -
                "model_type": "clip_text_model",
         | 
| 15 | 
            -
                "num_attention_heads": 12,
         | 
| 16 | 
            -
                "projection_dim": 768
         | 
| 17 | 
            -
              },
         | 
| 18 | 
            -
              "torch_dtype": "float32",
         | 
| 19 | 
            -
              "transformers_version": "4.44.2",
         | 
| 20 | 
            -
              "vision_config": {
         | 
| 21 | 
            -
                "dropout": 0.0,
         | 
| 22 | 
            -
                "hidden_size": 1024,
         | 
| 23 | 
            -
                "image_size": 336,
         | 
| 24 | 
            -
                "intermediate_size": 4096,
         | 
| 25 | 
            -
                "model_type": "clip_vision_model",
         | 
| 26 | 
            -
                "num_attention_heads": 16,
         | 
| 27 | 
            -
                "num_hidden_layers": 24,
         | 
| 28 | 
            -
                "patch_size": 14,
         | 
| 29 | 
            -
                "projection_dim": 768
         | 
| 30 | 
            -
              }
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/merges.txt
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/preprocessor_config.json
    DELETED
    
    | @@ -1,28 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "crop_size": {
         | 
| 3 | 
            -
                "height": 336,
         | 
| 4 | 
            -
                "width": 336
         | 
| 5 | 
            -
              },
         | 
| 6 | 
            -
              "do_center_crop": true,
         | 
| 7 | 
            -
              "do_convert_rgb": true,
         | 
| 8 | 
            -
              "do_normalize": true,
         | 
| 9 | 
            -
              "do_rescale": true,
         | 
| 10 | 
            -
              "do_resize": true,
         | 
| 11 | 
            -
              "image_mean": [
         | 
| 12 | 
            -
                0.48145466,
         | 
| 13 | 
            -
                0.4578275,
         | 
| 14 | 
            -
                0.40821073
         | 
| 15 | 
            -
              ],
         | 
| 16 | 
            -
              "image_processor_type": "CLIPImageProcessor",
         | 
| 17 | 
            -
              "image_std": [
         | 
| 18 | 
            -
                0.26862954,
         | 
| 19 | 
            -
                0.26130258,
         | 
| 20 | 
            -
                0.27577711
         | 
| 21 | 
            -
              ],
         | 
| 22 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 23 | 
            -
              "resample": 3,
         | 
| 24 | 
            -
              "rescale_factor": 0.00392156862745098,
         | 
| 25 | 
            -
              "size": {
         | 
| 26 | 
            -
                "shortest_edge": 336
         | 
| 27 | 
            -
              }
         | 
| 28 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/pytorch_model.bin
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:48ff58d9fc83caa293fe91287bbccb24c58fa276f07794f6ae6c95540e921372
         | 
| 3 | 
            -
            size 1711943042
         | 
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/special_tokens_map.json
    DELETED
    
    | @@ -1,30 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "bos_token": {
         | 
| 3 | 
            -
                "content": "<|startoftext|>",
         | 
| 4 | 
            -
                "lstrip": false,
         | 
| 5 | 
            -
                "normalized": true,
         | 
| 6 | 
            -
                "rstrip": false,
         | 
| 7 | 
            -
                "single_word": false
         | 
| 8 | 
            -
              },
         | 
| 9 | 
            -
              "eos_token": {
         | 
| 10 | 
            -
                "content": "<|endoftext|>",
         | 
| 11 | 
            -
                "lstrip": false,
         | 
| 12 | 
            -
                "normalized": false,
         | 
| 13 | 
            -
                "rstrip": false,
         | 
| 14 | 
            -
                "single_word": false
         | 
| 15 | 
            -
              },
         | 
| 16 | 
            -
              "pad_token": {
         | 
| 17 | 
            -
                "content": "<|endoftext|>",
         | 
| 18 | 
            -
                "lstrip": false,
         | 
| 19 | 
            -
                "normalized": false,
         | 
| 20 | 
            -
                "rstrip": false,
         | 
| 21 | 
            -
                "single_word": false
         | 
| 22 | 
            -
              },
         | 
| 23 | 
            -
              "unk_token": {
         | 
| 24 | 
            -
                "content": "<|endoftext|>",
         | 
| 25 | 
            -
                "lstrip": false,
         | 
| 26 | 
            -
                "normalized": false,
         | 
| 27 | 
            -
                "rstrip": false,
         | 
| 28 | 
            -
                "single_word": false
         | 
| 29 | 
            -
              }
         | 
| 30 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/tokenizer.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/tokenizer_config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "add_prefix_space": false,
         | 
| 3 | 
            -
              "added_tokens_decoder": {
         | 
| 4 | 
            -
                "49406": {
         | 
| 5 | 
            -
                  "content": "<|startoftext|>",
         | 
| 6 | 
            -
                  "lstrip": false,
         | 
| 7 | 
            -
                  "normalized": true,
         | 
| 8 | 
            -
                  "rstrip": false,
         | 
| 9 | 
            -
                  "single_word": false,
         | 
| 10 | 
            -
                  "special": true
         | 
| 11 | 
            -
                },
         | 
| 12 | 
            -
                "49407": {
         | 
| 13 | 
            -
                  "content": "<|endoftext|>",
         | 
| 14 | 
            -
                  "lstrip": false,
         | 
| 15 | 
            -
                  "normalized": false,
         | 
| 16 | 
            -
                  "rstrip": false,
         | 
| 17 | 
            -
                  "single_word": false,
         | 
| 18 | 
            -
                  "special": true
         | 
| 19 | 
            -
                }
         | 
| 20 | 
            -
              },
         | 
| 21 | 
            -
              "bos_token": "<|startoftext|>",
         | 
| 22 | 
            -
              "clean_up_tokenization_spaces": true,
         | 
| 23 | 
            -
              "do_lower_case": true,
         | 
| 24 | 
            -
              "eos_token": "<|endoftext|>",
         | 
| 25 | 
            -
              "errors": "replace",
         | 
| 26 | 
            -
              "model_max_length": 77,
         | 
| 27 | 
            -
              "pad_token": "<|endoftext|>",
         | 
| 28 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 29 | 
            -
              "tokenizer_class": "CLIPTokenizer",
         | 
| 30 | 
            -
              "unk_token": "<|endoftext|>"
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_caption_plotqa_sampled_5e-6_epoch2/vocab.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "_name_or_path": "openai/clip-vit-large-patch14-336",
         | 
| 3 | 
            -
              "architectures": [
         | 
| 4 | 
            -
                "CLIPModel"
         | 
| 5 | 
            -
              ],
         | 
| 6 | 
            -
              "initializer_factor": 1.0,
         | 
| 7 | 
            -
              "logit_scale_init_value": 2.6592,
         | 
| 8 | 
            -
              "model_type": "clip",
         | 
| 9 | 
            -
              "projection_dim": 768,
         | 
| 10 | 
            -
              "text_config": {
         | 
| 11 | 
            -
                "dropout": 0.0,
         | 
| 12 | 
            -
                "hidden_size": 768,
         | 
| 13 | 
            -
                "intermediate_size": 3072,
         | 
| 14 | 
            -
                "model_type": "clip_text_model",
         | 
| 15 | 
            -
                "num_attention_heads": 12,
         | 
| 16 | 
            -
                "projection_dim": 768
         | 
| 17 | 
            -
              },
         | 
| 18 | 
            -
              "torch_dtype": "float32",
         | 
| 19 | 
            -
              "transformers_version": "4.44.2",
         | 
| 20 | 
            -
              "vision_config": {
         | 
| 21 | 
            -
                "dropout": 0.0,
         | 
| 22 | 
            -
                "hidden_size": 1024,
         | 
| 23 | 
            -
                "image_size": 336,
         | 
| 24 | 
            -
                "intermediate_size": 4096,
         | 
| 25 | 
            -
                "model_type": "clip_vision_model",
         | 
| 26 | 
            -
                "num_attention_heads": 16,
         | 
| 27 | 
            -
                "num_hidden_layers": 24,
         | 
| 28 | 
            -
                "patch_size": 14,
         | 
| 29 | 
            -
                "projection_dim": 768
         | 
| 30 | 
            -
              }
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/merges.txt
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/preprocessor_config.json
    DELETED
    
    | @@ -1,28 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "crop_size": {
         | 
| 3 | 
            -
                "height": 336,
         | 
| 4 | 
            -
                "width": 336
         | 
| 5 | 
            -
              },
         | 
| 6 | 
            -
              "do_center_crop": true,
         | 
| 7 | 
            -
              "do_convert_rgb": true,
         | 
| 8 | 
            -
              "do_normalize": true,
         | 
| 9 | 
            -
              "do_rescale": true,
         | 
| 10 | 
            -
              "do_resize": true,
         | 
| 11 | 
            -
              "image_mean": [
         | 
| 12 | 
            -
                0.48145466,
         | 
| 13 | 
            -
                0.4578275,
         | 
| 14 | 
            -
                0.40821073
         | 
| 15 | 
            -
              ],
         | 
| 16 | 
            -
              "image_processor_type": "CLIPImageProcessor",
         | 
| 17 | 
            -
              "image_std": [
         | 
| 18 | 
            -
                0.26862954,
         | 
| 19 | 
            -
                0.26130258,
         | 
| 20 | 
            -
                0.27577711
         | 
| 21 | 
            -
              ],
         | 
| 22 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 23 | 
            -
              "resample": 3,
         | 
| 24 | 
            -
              "rescale_factor": 0.00392156862745098,
         | 
| 25 | 
            -
              "size": {
         | 
| 26 | 
            -
                "shortest_edge": 336
         | 
| 27 | 
            -
              }
         | 
| 28 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/pytorch_model.bin
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:e7b91d58bfa56debcf432b00e1f7672393376c72d69a10903ab669a13ff89e95
         | 
| 3 | 
            -
            size 1711943042
         | 
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/special_tokens_map.json
    DELETED
    
    | @@ -1,30 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "bos_token": {
         | 
| 3 | 
            -
                "content": "<|startoftext|>",
         | 
| 4 | 
            -
                "lstrip": false,
         | 
| 5 | 
            -
                "normalized": true,
         | 
| 6 | 
            -
                "rstrip": false,
         | 
| 7 | 
            -
                "single_word": false
         | 
| 8 | 
            -
              },
         | 
| 9 | 
            -
              "eos_token": {
         | 
| 10 | 
            -
                "content": "<|endoftext|>",
         | 
| 11 | 
            -
                "lstrip": false,
         | 
| 12 | 
            -
                "normalized": false,
         | 
| 13 | 
            -
                "rstrip": false,
         | 
| 14 | 
            -
                "single_word": false
         | 
| 15 | 
            -
              },
         | 
| 16 | 
            -
              "pad_token": {
         | 
| 17 | 
            -
                "content": "<|endoftext|>",
         | 
| 18 | 
            -
                "lstrip": false,
         | 
| 19 | 
            -
                "normalized": false,
         | 
| 20 | 
            -
                "rstrip": false,
         | 
| 21 | 
            -
                "single_word": false
         | 
| 22 | 
            -
              },
         | 
| 23 | 
            -
              "unk_token": {
         | 
| 24 | 
            -
                "content": "<|endoftext|>",
         | 
| 25 | 
            -
                "lstrip": false,
         | 
| 26 | 
            -
                "normalized": false,
         | 
| 27 | 
            -
                "rstrip": false,
         | 
| 28 | 
            -
                "single_word": false
         | 
| 29 | 
            -
              }
         | 
| 30 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/tokenizer.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/tokenizer_config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "add_prefix_space": false,
         | 
| 3 | 
            -
              "added_tokens_decoder": {
         | 
| 4 | 
            -
                "49406": {
         | 
| 5 | 
            -
                  "content": "<|startoftext|>",
         | 
| 6 | 
            -
                  "lstrip": false,
         | 
| 7 | 
            -
                  "normalized": true,
         | 
| 8 | 
            -
                  "rstrip": false,
         | 
| 9 | 
            -
                  "single_word": false,
         | 
| 10 | 
            -
                  "special": true
         | 
| 11 | 
            -
                },
         | 
| 12 | 
            -
                "49407": {
         | 
| 13 | 
            -
                  "content": "<|endoftext|>",
         | 
| 14 | 
            -
                  "lstrip": false,
         | 
| 15 | 
            -
                  "normalized": false,
         | 
| 16 | 
            -
                  "rstrip": false,
         | 
| 17 | 
            -
                  "single_word": false,
         | 
| 18 | 
            -
                  "special": true
         | 
| 19 | 
            -
                }
         | 
| 20 | 
            -
              },
         | 
| 21 | 
            -
              "bos_token": "<|startoftext|>",
         | 
| 22 | 
            -
              "clean_up_tokenization_spaces": true,
         | 
| 23 | 
            -
              "do_lower_case": true,
         | 
| 24 | 
            -
              "eos_token": "<|endoftext|>",
         | 
| 25 | 
            -
              "errors": "replace",
         | 
| 26 | 
            -
              "model_max_length": 77,
         | 
| 27 | 
            -
              "pad_token": "<|endoftext|>",
         | 
| 28 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 29 | 
            -
              "tokenizer_class": "CLIPTokenizer",
         | 
| 30 | 
            -
              "unk_token": "<|endoftext|>"
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_dvqa_1e-5_epoch3/vocab.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "_name_or_path": "openai/clip-vit-large-patch14-336",
         | 
| 3 | 
            -
              "architectures": [
         | 
| 4 | 
            -
                "CLIPModel"
         | 
| 5 | 
            -
              ],
         | 
| 6 | 
            -
              "initializer_factor": 1.0,
         | 
| 7 | 
            -
              "logit_scale_init_value": 2.6592,
         | 
| 8 | 
            -
              "model_type": "clip",
         | 
| 9 | 
            -
              "projection_dim": 768,
         | 
| 10 | 
            -
              "text_config": {
         | 
| 11 | 
            -
                "dropout": 0.0,
         | 
| 12 | 
            -
                "hidden_size": 768,
         | 
| 13 | 
            -
                "intermediate_size": 3072,
         | 
| 14 | 
            -
                "model_type": "clip_text_model",
         | 
| 15 | 
            -
                "num_attention_heads": 12,
         | 
| 16 | 
            -
                "projection_dim": 768
         | 
| 17 | 
            -
              },
         | 
| 18 | 
            -
              "torch_dtype": "float32",
         | 
| 19 | 
            -
              "transformers_version": "4.44.2",
         | 
| 20 | 
            -
              "vision_config": {
         | 
| 21 | 
            -
                "dropout": 0.0,
         | 
| 22 | 
            -
                "hidden_size": 1024,
         | 
| 23 | 
            -
                "image_size": 336,
         | 
| 24 | 
            -
                "intermediate_size": 4096,
         | 
| 25 | 
            -
                "model_type": "clip_vision_model",
         | 
| 26 | 
            -
                "num_attention_heads": 16,
         | 
| 27 | 
            -
                "num_hidden_layers": 24,
         | 
| 28 | 
            -
                "patch_size": 14,
         | 
| 29 | 
            -
                "projection_dim": 768
         | 
| 30 | 
            -
              }
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/merges.txt
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/preprocessor_config.json
    DELETED
    
    | @@ -1,28 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "crop_size": {
         | 
| 3 | 
            -
                "height": 336,
         | 
| 4 | 
            -
                "width": 336
         | 
| 5 | 
            -
              },
         | 
| 6 | 
            -
              "do_center_crop": true,
         | 
| 7 | 
            -
              "do_convert_rgb": true,
         | 
| 8 | 
            -
              "do_normalize": true,
         | 
| 9 | 
            -
              "do_rescale": true,
         | 
| 10 | 
            -
              "do_resize": true,
         | 
| 11 | 
            -
              "image_mean": [
         | 
| 12 | 
            -
                0.48145466,
         | 
| 13 | 
            -
                0.4578275,
         | 
| 14 | 
            -
                0.40821073
         | 
| 15 | 
            -
              ],
         | 
| 16 | 
            -
              "image_processor_type": "CLIPImageProcessor",
         | 
| 17 | 
            -
              "image_std": [
         | 
| 18 | 
            -
                0.26862954,
         | 
| 19 | 
            -
                0.26130258,
         | 
| 20 | 
            -
                0.27577711
         | 
| 21 | 
            -
              ],
         | 
| 22 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 23 | 
            -
              "resample": 3,
         | 
| 24 | 
            -
              "rescale_factor": 0.00392156862745098,
         | 
| 25 | 
            -
              "size": {
         | 
| 26 | 
            -
                "shortest_edge": 336
         | 
| 27 | 
            -
              }
         | 
| 28 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/pytorch_model.bin
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:54eb8d1ae9d141537d8a70b1fab80faaee1ec1d5739bb8deb0cffb9c1224ab61
         | 
| 3 | 
            -
            size 1711943042
         | 
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/special_tokens_map.json
    DELETED
    
    | @@ -1,30 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "bos_token": {
         | 
| 3 | 
            -
                "content": "<|startoftext|>",
         | 
| 4 | 
            -
                "lstrip": false,
         | 
| 5 | 
            -
                "normalized": true,
         | 
| 6 | 
            -
                "rstrip": false,
         | 
| 7 | 
            -
                "single_word": false
         | 
| 8 | 
            -
              },
         | 
| 9 | 
            -
              "eos_token": {
         | 
| 10 | 
            -
                "content": "<|endoftext|>",
         | 
| 11 | 
            -
                "lstrip": false,
         | 
| 12 | 
            -
                "normalized": false,
         | 
| 13 | 
            -
                "rstrip": false,
         | 
| 14 | 
            -
                "single_word": false
         | 
| 15 | 
            -
              },
         | 
| 16 | 
            -
              "pad_token": {
         | 
| 17 | 
            -
                "content": "<|endoftext|>",
         | 
| 18 | 
            -
                "lstrip": false,
         | 
| 19 | 
            -
                "normalized": false,
         | 
| 20 | 
            -
                "rstrip": false,
         | 
| 21 | 
            -
                "single_word": false
         | 
| 22 | 
            -
              },
         | 
| 23 | 
            -
              "unk_token": {
         | 
| 24 | 
            -
                "content": "<|endoftext|>",
         | 
| 25 | 
            -
                "lstrip": false,
         | 
| 26 | 
            -
                "normalized": false,
         | 
| 27 | 
            -
                "rstrip": false,
         | 
| 28 | 
            -
                "single_word": false
         | 
| 29 | 
            -
              }
         | 
| 30 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/tokenizer.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/tokenizer_config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "add_prefix_space": false,
         | 
| 3 | 
            -
              "added_tokens_decoder": {
         | 
| 4 | 
            -
                "49406": {
         | 
| 5 | 
            -
                  "content": "<|startoftext|>",
         | 
| 6 | 
            -
                  "lstrip": false,
         | 
| 7 | 
            -
                  "normalized": true,
         | 
| 8 | 
            -
                  "rstrip": false,
         | 
| 9 | 
            -
                  "single_word": false,
         | 
| 10 | 
            -
                  "special": true
         | 
| 11 | 
            -
                },
         | 
| 12 | 
            -
                "49407": {
         | 
| 13 | 
            -
                  "content": "<|endoftext|>",
         | 
| 14 | 
            -
                  "lstrip": false,
         | 
| 15 | 
            -
                  "normalized": false,
         | 
| 16 | 
            -
                  "rstrip": false,
         | 
| 17 | 
            -
                  "single_word": false,
         | 
| 18 | 
            -
                  "special": true
         | 
| 19 | 
            -
                }
         | 
| 20 | 
            -
              },
         | 
| 21 | 
            -
              "bos_token": "<|startoftext|>",
         | 
| 22 | 
            -
              "clean_up_tokenization_spaces": true,
         | 
| 23 | 
            -
              "do_lower_case": true,
         | 
| 24 | 
            -
              "eos_token": "<|endoftext|>",
         | 
| 25 | 
            -
              "errors": "replace",
         | 
| 26 | 
            -
              "model_max_length": 77,
         | 
| 27 | 
            -
              "pad_token": "<|endoftext|>",
         | 
| 28 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 29 | 
            -
              "tokenizer_class": "CLIPTokenizer",
         | 
| 30 | 
            -
              "unk_token": "<|endoftext|>"
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_figureqa_1e-5_epoch3/vocab.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_fqa_epoch2/config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "_name_or_path": "openai/clip-vit-large-patch14-336",
         | 
| 3 | 
            -
              "architectures": [
         | 
| 4 | 
            -
                "CLIPModel"
         | 
| 5 | 
            -
              ],
         | 
| 6 | 
            -
              "initializer_factor": 1.0,
         | 
| 7 | 
            -
              "logit_scale_init_value": 2.6592,
         | 
| 8 | 
            -
              "model_type": "clip",
         | 
| 9 | 
            -
              "projection_dim": 768,
         | 
| 10 | 
            -
              "text_config": {
         | 
| 11 | 
            -
                "dropout": 0.0,
         | 
| 12 | 
            -
                "hidden_size": 768,
         | 
| 13 | 
            -
                "intermediate_size": 3072,
         | 
| 14 | 
            -
                "model_type": "clip_text_model",
         | 
| 15 | 
            -
                "num_attention_heads": 12,
         | 
| 16 | 
            -
                "projection_dim": 768
         | 
| 17 | 
            -
              },
         | 
| 18 | 
            -
              "torch_dtype": "float32",
         | 
| 19 | 
            -
              "transformers_version": "4.44.2",
         | 
| 20 | 
            -
              "vision_config": {
         | 
| 21 | 
            -
                "dropout": 0.0,
         | 
| 22 | 
            -
                "hidden_size": 1024,
         | 
| 23 | 
            -
                "image_size": 336,
         | 
| 24 | 
            -
                "intermediate_size": 4096,
         | 
| 25 | 
            -
                "model_type": "clip_vision_model",
         | 
| 26 | 
            -
                "num_attention_heads": 16,
         | 
| 27 | 
            -
                "num_hidden_layers": 24,
         | 
| 28 | 
            -
                "patch_size": 14,
         | 
| 29 | 
            -
                "projection_dim": 768
         | 
| 30 | 
            -
              }
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_fqa_epoch2/merges.txt
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_fqa_epoch2/preprocessor_config.json
    DELETED
    
    | @@ -1,28 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "crop_size": {
         | 
| 3 | 
            -
                "height": 336,
         | 
| 4 | 
            -
                "width": 336
         | 
| 5 | 
            -
              },
         | 
| 6 | 
            -
              "do_center_crop": true,
         | 
| 7 | 
            -
              "do_convert_rgb": true,
         | 
| 8 | 
            -
              "do_normalize": true,
         | 
| 9 | 
            -
              "do_rescale": true,
         | 
| 10 | 
            -
              "do_resize": true,
         | 
| 11 | 
            -
              "image_mean": [
         | 
| 12 | 
            -
                0.48145466,
         | 
| 13 | 
            -
                0.4578275,
         | 
| 14 | 
            -
                0.40821073
         | 
| 15 | 
            -
              ],
         | 
| 16 | 
            -
              "image_processor_type": "CLIPImageProcessor",
         | 
| 17 | 
            -
              "image_std": [
         | 
| 18 | 
            -
                0.26862954,
         | 
| 19 | 
            -
                0.26130258,
         | 
| 20 | 
            -
                0.27577711
         | 
| 21 | 
            -
              ],
         | 
| 22 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 23 | 
            -
              "resample": 3,
         | 
| 24 | 
            -
              "rescale_factor": 0.00392156862745098,
         | 
| 25 | 
            -
              "size": {
         | 
| 26 | 
            -
                "shortest_edge": 336
         | 
| 27 | 
            -
              }
         | 
| 28 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_fqa_epoch2/pytorch_model.bin
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:94324612864f3b150fe8b3d0472b3d100eef6bf41c3a327074022f623c6a80ea
         | 
| 3 | 
            -
            size 1711943042
         | 
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_fqa_epoch2/special_tokens_map.json
    DELETED
    
    | @@ -1,30 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "bos_token": {
         | 
| 3 | 
            -
                "content": "<|startoftext|>",
         | 
| 4 | 
            -
                "lstrip": false,
         | 
| 5 | 
            -
                "normalized": true,
         | 
| 6 | 
            -
                "rstrip": false,
         | 
| 7 | 
            -
                "single_word": false
         | 
| 8 | 
            -
              },
         | 
| 9 | 
            -
              "eos_token": {
         | 
| 10 | 
            -
                "content": "<|endoftext|>",
         | 
| 11 | 
            -
                "lstrip": false,
         | 
| 12 | 
            -
                "normalized": false,
         | 
| 13 | 
            -
                "rstrip": false,
         | 
| 14 | 
            -
                "single_word": false
         | 
| 15 | 
            -
              },
         | 
| 16 | 
            -
              "pad_token": {
         | 
| 17 | 
            -
                "content": "<|endoftext|>",
         | 
| 18 | 
            -
                "lstrip": false,
         | 
| 19 | 
            -
                "normalized": false,
         | 
| 20 | 
            -
                "rstrip": false,
         | 
| 21 | 
            -
                "single_word": false
         | 
| 22 | 
            -
              },
         | 
| 23 | 
            -
              "unk_token": {
         | 
| 24 | 
            -
                "content": "<|endoftext|>",
         | 
| 25 | 
            -
                "lstrip": false,
         | 
| 26 | 
            -
                "normalized": false,
         | 
| 27 | 
            -
                "rstrip": false,
         | 
| 28 | 
            -
                "single_word": false
         | 
| 29 | 
            -
              }
         | 
| 30 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        data/vision4math_clip_model/negclip_fqa_epoch2/tokenizer.json
    DELETED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
    	
        data/vision4math_clip_model/negclip_fqa_epoch2/tokenizer_config.json
    DELETED
    
    | @@ -1,31 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "add_prefix_space": false,
         | 
| 3 | 
            -
              "added_tokens_decoder": {
         | 
| 4 | 
            -
                "49406": {
         | 
| 5 | 
            -
                  "content": "<|startoftext|>",
         | 
| 6 | 
            -
                  "lstrip": false,
         | 
| 7 | 
            -
                  "normalized": true,
         | 
| 8 | 
            -
                  "rstrip": false,
         | 
| 9 | 
            -
                  "single_word": false,
         | 
| 10 | 
            -
                  "special": true
         | 
| 11 | 
            -
                },
         | 
| 12 | 
            -
                "49407": {
         | 
| 13 | 
            -
                  "content": "<|endoftext|>",
         | 
| 14 | 
            -
                  "lstrip": false,
         | 
| 15 | 
            -
                  "normalized": false,
         | 
| 16 | 
            -
                  "rstrip": false,
         | 
| 17 | 
            -
                  "single_word": false,
         | 
| 18 | 
            -
                  "special": true
         | 
| 19 | 
            -
                }
         | 
| 20 | 
            -
              },
         | 
| 21 | 
            -
              "bos_token": "<|startoftext|>",
         | 
| 22 | 
            -
              "clean_up_tokenization_spaces": true,
         | 
| 23 | 
            -
              "do_lower_case": true,
         | 
| 24 | 
            -
              "eos_token": "<|endoftext|>",
         | 
| 25 | 
            -
              "errors": "replace",
         | 
| 26 | 
            -
              "model_max_length": 77,
         | 
| 27 | 
            -
              "pad_token": "<|endoftext|>",
         | 
| 28 | 
            -
              "processor_class": "CLIPProcessor",
         | 
| 29 | 
            -
              "tokenizer_class": "CLIPTokenizer",
         | 
| 30 | 
            -
              "unk_token": "<|endoftext|>"
         | 
| 31 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  |