model_id
stringlengths
7
105
model_card
stringlengths
1
130k
model_labels
listlengths
2
80k
jayanta/resnet50-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet50-finetuned-memes This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.0625 - Accuracy: 0.5742 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.4795 | 0.99 | 40 | 1.4641 | 0.4382 | | 1.3455 | 1.99 | 80 | 1.3281 | 0.4389 | | 1.262 | 2.99 | 120 | 1.2583 | 0.4583 | | 1.1975 | 3.99 | 160 | 1.1978 | 0.4876 | | 1.1358 | 4.99 | 200 | 1.1614 | 0.5139 | | 1.1273 | 5.99 | 240 | 1.1316 | 0.5379 | | 1.0379 | 6.99 | 280 | 1.1024 | 0.5464 | | 1.041 | 7.99 | 320 | 1.0927 | 0.5580 | | 0.9952 | 8.99 | 360 | 1.0790 | 0.5541 | | 1.0146 | 9.99 | 400 | 1.0625 | 0.5742 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
edub0420/autotrain-graphwerk-1472254089
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1472254089 - CO2 Emissions (in grams): 0.0038 ## Validation Metrics - Loss: 0.005 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "buy", "sell" ]
edub0420/autotrain-graphwerk-1472254090
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1472254090 - CO2 Emissions (in grams): 0.8960 ## Validation Metrics - Loss: 0.004 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "buy", "sell" ]
jayanta/aaraki-vit-base-patch16-224-in21k-finetuned-cifar10
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mit-b2-finetuned-memes This model is a fine-tuned version of [aaraki/vit-base-patch16-224-in21k-finetuned-cifar10](https://huggingface.co/aaraki/vit-base-patch16-224-in21k-finetuned-cifar10) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4137 - Accuracy: 0.8524 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.9727 | 0.99 | 40 | 0.8400 | 0.7334 | | 0.5305 | 1.99 | 80 | 0.5147 | 0.8284 | | 0.3124 | 2.99 | 120 | 0.4698 | 0.8145 | | 0.2263 | 3.99 | 160 | 0.3892 | 0.8563 | | 0.1453 | 4.99 | 200 | 0.3874 | 0.8570 | | 0.1255 | 5.99 | 240 | 0.4097 | 0.8470 | | 0.0989 | 6.99 | 280 | 0.3860 | 0.8570 | | 0.0755 | 7.99 | 320 | 0.4141 | 0.8539 | | 0.08 | 8.99 | 360 | 0.4049 | 0.8594 | | 0.0639 | 9.99 | 400 | 0.4137 | 0.8524 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/swin-base-patch4-window7-224-20epochs-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-base-patch4-window7-224-20epochs-finetuned-memes This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.7090 - Accuracy: 0.8478 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.0238 | 0.99 | 40 | 0.9636 | 0.6445 | | 0.777 | 1.99 | 80 | 0.6591 | 0.7666 | | 0.4763 | 2.99 | 120 | 0.5381 | 0.8130 | | 0.3215 | 3.99 | 160 | 0.5244 | 0.8253 | | 0.2179 | 4.99 | 200 | 0.5123 | 0.8238 | | 0.1868 | 5.99 | 240 | 0.5052 | 0.8308 | | 0.154 | 6.99 | 280 | 0.5444 | 0.8338 | | 0.1166 | 7.99 | 320 | 0.6318 | 0.8238 | | 0.1099 | 8.99 | 360 | 0.5656 | 0.8338 | | 0.0925 | 9.99 | 400 | 0.6057 | 0.8338 | | 0.0779 | 10.99 | 440 | 0.5942 | 0.8393 | | 0.0629 | 11.99 | 480 | 0.6112 | 0.8400 | | 0.0742 | 12.99 | 520 | 0.6588 | 0.8331 | | 0.0752 | 13.99 | 560 | 0.6143 | 0.8408 | | 0.0577 | 14.99 | 600 | 0.6450 | 0.8516 | | 0.0589 | 15.99 | 640 | 0.6787 | 0.8400 | | 0.0555 | 16.99 | 680 | 0.6641 | 0.8454 | | 0.052 | 17.99 | 720 | 0.7213 | 0.8524 | | 0.0589 | 18.99 | 760 | 0.6917 | 0.8470 | | 0.0506 | 19.99 | 800 | 0.7090 | 0.8478 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
roupenminassian/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6712 - Accuracy: 0.5872 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6811 | 1.0 | 21 | 0.6773 | 0.5604 | | 0.667 | 2.0 | 42 | 0.6743 | 0.5805 | | 0.6521 | 3.0 | 63 | 0.6712 | 0.5872 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "ce", "laa" ]
surya07/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4066 - Accuracy: 0.875 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.57 | 1 | 0.7569 | 0.5417 | | No log | 1.57 | 2 | 0.5000 | 0.8333 | | No log | 2.57 | 3 | 0.4066 | 0.875 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "bursted_polyp", "polyps" ]
tianchez/autotrain-line_clip_no_nut_boltline_clip_no_nut_bolt-1523955096
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1523955096 - CO2 Emissions (in grams): 10.4234 ## Validation Metrics - Loss: 0.580 - Accuracy: 0.798 - Macro F1: 0.542 - Micro F1: 0.798 - Weighted F1: 0.796 - Macro Precision: 0.548 - Micro Precision: 0.798 - Weighted Precision: 0.796 - Macro Recall: 0.537 - Micro Recall: 0.798 - Weighted Recall: 0.798
[ "double_flat_buckle", "double_loose", "unknown", "double_under_buckle", "normal", "pin_installation_irregular", "pin_missing", "pin_prolapse", "single_flat_buckle", "single_loose", "single_under_buckle" ]
omarques/autotrain-dogs-and-cats-1527055142
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1527055142 - CO2 Emissions (in grams): 0.8187 ## Validation Metrics - Loss: 0.068 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "cat", "dog" ]
omarques/autotrain-test-dogs-cats-1527155150
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1527155150 - CO2 Emissions (in grams): 0.7874 ## Validation Metrics - Loss: 0.043 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "cat", "dog" ]
ImageIN/resnet-50_finetuned
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet-50_finetuned This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7209 - Precision: 0.3702 - Recall: 0.5 - F1: 0.4254 - Accuracy: 0.7404 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 46 | 0.6599 | 0.3702 | 0.5 | 0.4254 | 0.7404 | | No log | 2.0 | 92 | 0.6725 | 0.3702 | 0.5 | 0.4254 | 0.7404 | | No log | 3.0 | 138 | nan | 0.8714 | 0.5062 | 0.4384 | 0.7436 | | No log | 4.0 | 184 | nan | 0.8714 | 0.5062 | 0.4384 | 0.7436 | | No log | 5.0 | 230 | nan | 0.8714 | 0.5062 | 0.4384 | 0.7436 | | No log | 6.0 | 276 | nan | 0.8714 | 0.5062 | 0.4384 | 0.7436 | | No log | 7.0 | 322 | nan | 0.8714 | 0.5062 | 0.4384 | 0.7436 | | No log | 8.0 | 368 | nan | 0.8714 | 0.5062 | 0.4384 | 0.7436 | | No log | 9.0 | 414 | nan | 0.8714 | 0.5062 | 0.4384 | 0.7436 | | No log | 10.0 | 460 | 0.7209 | 0.3702 | 0.5 | 0.4254 | 0.7404 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "illustrated", "not-illustrated" ]
ImageIN/convnext-tiny-224_finetuned
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224_finetuned This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0895 - Precision: 0.9807 - Recall: 0.9608 - F1: 0.9702 - Accuracy: 0.9776 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 46 | 0.3080 | 0.9096 | 0.6852 | 0.7206 | 0.8365 | | No log | 2.0 | 92 | 0.1644 | 0.9660 | 0.9176 | 0.9386 | 0.9551 | | No log | 3.0 | 138 | 0.0974 | 0.9742 | 0.9586 | 0.9661 | 0.9744 | | No log | 4.0 | 184 | 0.0795 | 0.9829 | 0.9670 | 0.9746 | 0.9808 | | No log | 5.0 | 230 | 0.0838 | 0.9807 | 0.9608 | 0.9702 | 0.9776 | | No log | 6.0 | 276 | 0.0838 | 0.9807 | 0.9608 | 0.9702 | 0.9776 | | No log | 7.0 | 322 | 0.0803 | 0.9829 | 0.9670 | 0.9746 | 0.9808 | | No log | 8.0 | 368 | 0.0869 | 0.9807 | 0.9608 | 0.9702 | 0.9776 | | No log | 9.0 | 414 | 0.0897 | 0.9807 | 0.9608 | 0.9702 | 0.9776 | | No log | 10.0 | 460 | 0.0895 | 0.9807 | 0.9608 | 0.9702 | 0.9776 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "illustrated", "not-illustrated" ]
ChristosSevastopoulos/swin-tiny-patch4-window7-224-thecbbbfs
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-thecbbbfs This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.3088 - Accuracy: 0.8933 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5717 | 0.96 | 12 | 0.3088 | 0.8933 | ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1+cu102 - Datasets 2.5.1 - Tokenizers 0.13.0
[ "negative_set_her_3", "positive_set_her_3" ]
nateraw/convnext-tiny-224-finetuned-eurosat-albumentations
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224-finetuned-eurosat-albumentations This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0608 - Accuracy: 0.9815 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1449 | 1.0 | 190 | 0.1327 | 0.9685 | | 0.0766 | 2.0 | 380 | 0.0762 | 0.9774 | | 0.0493 | 3.0 | 570 | 0.0608 | 0.9815 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
erikejw/swinv2-tiny-patch4-window8-256-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swinv2-tiny-patch4-window8-256-finetuned-eurosat This model is a fine-tuned version of [microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0510 - Accuracy: 0.9826 - F1: 0.9826 - Precision: 0.9828 - Recall: 0.9826 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | 0.4479 | 1.0 | 95 | 0.1592 | 0.9478 | 0.9478 | 0.9500 | 0.9478 | | 0.3078 | 2.0 | 190 | 0.0914 | 0.9685 | 0.9686 | 0.9695 | 0.9685 | | 0.2307 | 3.0 | 285 | 0.0603 | 0.9785 | 0.9785 | 0.9790 | 0.9785 | | 0.227 | 4.0 | 380 | 0.0531 | 0.9811 | 0.9811 | 0.9814 | 0.9811 | | 0.1674 | 5.0 | 475 | 0.0510 | 0.9826 | 0.9826 | 0.9828 | 0.9826 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
erikejw/swinv2-small-patch4-window16-256-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swinv2-small-patch4-window16-256-finetuned-eurosat This model is a fine-tuned version of [microsoft/swinv2-small-patch4-window16-256](https://huggingface.co/microsoft/swinv2-small-patch4-window16-256) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0328 - Accuracy: 0.9893 - F1: 0.9893 - Precision: 0.9893 - Recall: 0.9893 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 96 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:| | 0.2326 | 1.0 | 253 | 0.0870 | 0.9715 | 0.9716 | 0.9720 | 0.9715 | | 0.1955 | 2.0 | 506 | 0.0576 | 0.9789 | 0.9788 | 0.9794 | 0.9789 | | 0.1229 | 3.0 | 759 | 0.0450 | 0.9837 | 0.9837 | 0.9839 | 0.9837 | | 0.0797 | 4.0 | 1012 | 0.0332 | 0.9889 | 0.9889 | 0.9889 | 0.9889 | | 0.0826 | 5.0 | 1265 | 0.0328 | 0.9893 | 0.9893 | 0.9893 | 0.9893 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
glopez/cifar-10
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cifar-10 This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the cifar10 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ]
ImageIN/convnext-base-224_finetuned_on_ImageIn_annotations
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-base-224_finetuned_on_ImageIn_annotations This model is a fine-tuned version of [facebook/convnext-base-224](https://huggingface.co/facebook/convnext-base-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0749 - Precision: 0.9722 - Recall: 0.9811 - F1: 0.9765 - Accuracy: 0.9824 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 83 | 0.1368 | 0.9748 | 0.9632 | 0.9688 | 0.9772 | | No log | 2.0 | 166 | 0.0734 | 0.9750 | 0.9727 | 0.9739 | 0.9807 | | No log | 3.0 | 249 | 0.0693 | 0.9750 | 0.9727 | 0.9739 | 0.9807 | | No log | 4.0 | 332 | 0.0698 | 0.9750 | 0.9727 | 0.9739 | 0.9807 | | No log | 5.0 | 415 | 0.0688 | 0.9750 | 0.9727 | 0.9739 | 0.9807 | | No log | 6.0 | 498 | 0.0690 | 0.9729 | 0.9751 | 0.9740 | 0.9807 | | 0.0947 | 7.0 | 581 | 0.0666 | 0.9689 | 0.9800 | 0.9743 | 0.9807 | | 0.0947 | 8.0 | 664 | 0.0642 | 0.9689 | 0.9800 | 0.9743 | 0.9807 | | 0.0947 | 9.0 | 747 | 0.0790 | 0.9763 | 0.9763 | 0.9763 | 0.9824 | | 0.0947 | 10.0 | 830 | 0.0813 | 0.9750 | 0.9727 | 0.9739 | 0.9807 | | 0.0947 | 11.0 | 913 | 0.0797 | 0.9750 | 0.9727 | 0.9739 | 0.9807 | | 0.0947 | 12.0 | 996 | 0.0791 | 0.9763 | 0.9763 | 0.9763 | 0.9824 | | 0.0205 | 13.0 | 1079 | 0.0871 | 0.9750 | 0.9727 | 0.9739 | 0.9807 | | 0.0205 | 14.0 | 1162 | 0.0716 | 0.9722 | 0.9811 | 0.9765 | 0.9824 | | 0.0205 | 15.0 | 1245 | 0.0746 | 0.9776 | 0.9799 | 0.9787 | 0.9842 | | 0.0205 | 16.0 | 1328 | 0.0917 | 0.9738 | 0.9692 | 0.9714 | 0.9789 | | 0.0205 | 17.0 | 1411 | 0.0694 | 0.9776 | 0.9799 | 0.9787 | 0.9842 | | 0.0205 | 18.0 | 1494 | 0.0697 | 0.9768 | 0.9859 | 0.9812 | 0.9859 | | 0.0166 | 19.0 | 1577 | 0.0689 | 0.9702 | 0.9835 | 0.9766 | 0.9824 | | 0.0166 | 20.0 | 1660 | 0.0995 | 0.9738 | 0.9692 | 0.9714 | 0.9789 | | 0.0166 | 21.0 | 1743 | 0.0847 | 0.9776 | 0.9799 | 0.9787 | 0.9842 | | 0.0166 | 22.0 | 1826 | 0.0843 | 0.9776 | 0.9799 | 0.9787 | 0.9842 | | 0.0166 | 23.0 | 1909 | 0.0869 | 0.9750 | 0.9727 | 0.9739 | 0.9807 | | 0.0166 | 24.0 | 1992 | 0.0762 | 0.9789 | 0.9835 | 0.9811 | 0.9859 | | 0.0125 | 25.0 | 2075 | 0.0778 | 0.9789 | 0.9835 | 0.9811 | 0.9859 | | 0.0125 | 26.0 | 2158 | 0.0834 | 0.9763 | 0.9763 | 0.9763 | 0.9824 | | 0.0125 | 27.0 | 2241 | 0.0818 | 0.9776 | 0.9799 | 0.9787 | 0.9842 | | 0.0125 | 28.0 | 2324 | 0.0756 | 0.9684 | 0.9859 | 0.9768 | 0.9824 | | 0.0125 | 29.0 | 2407 | 0.1150 | 0.9591 | 0.9824 | 0.9700 | 0.9772 | | 0.0125 | 30.0 | 2490 | 0.0781 | 0.9748 | 0.9883 | 0.9813 | 0.9859 | | 0.0111 | 31.0 | 2573 | 0.0793 | 0.9716 | 0.9871 | 0.9790 | 0.9842 | | 0.0111 | 32.0 | 2656 | 0.0713 | 0.9748 | 0.9883 | 0.9813 | 0.9859 | | 0.0111 | 33.0 | 2739 | 0.0802 | 0.9748 | 0.9883 | 0.9813 | 0.9859 | | 0.0111 | 34.0 | 2822 | 0.0636 | 0.9802 | 0.9870 | 0.9835 | 0.9877 | | 0.0111 | 35.0 | 2905 | 0.0702 | 0.9789 | 0.9835 | 0.9811 | 0.9859 | | 0.0111 | 36.0 | 2988 | 0.0773 | 0.9748 | 0.9883 | 0.9813 | 0.9859 | | 0.0145 | 37.0 | 3071 | 0.0663 | 0.9781 | 0.9894 | 0.9836 | 0.9877 | | 0.0145 | 38.0 | 3154 | 0.0721 | 0.9789 | 0.9835 | 0.9811 | 0.9859 | | 0.0145 | 39.0 | 3237 | 0.0708 | 0.9789 | 0.9835 | 0.9811 | 0.9859 | | 0.0145 | 40.0 | 3320 | 0.0729 | 0.9748 | 0.9883 | 0.9813 | 0.9859 | | 0.0145 | 41.0 | 3403 | 0.0760 | 0.9748 | 0.9883 | 0.9813 | 0.9859 | | 0.0145 | 42.0 | 3486 | 0.0771 | 0.9716 | 0.9871 | 0.9790 | 0.9842 | | 0.0106 | 43.0 | 3569 | 0.0713 | 0.9748 | 0.9883 | 0.9813 | 0.9859 | | 0.0106 | 44.0 | 3652 | 0.0721 | 0.9748 | 0.9883 | 0.9813 | 0.9859 | | 0.0106 | 45.0 | 3735 | 0.0732 | 0.9768 | 0.9859 | 0.9812 | 0.9859 | | 0.0106 | 46.0 | 3818 | 0.0783 | 0.9789 | 0.9835 | 0.9811 | 0.9859 | | 0.0106 | 47.0 | 3901 | 0.0770 | 0.9789 | 0.9835 | 0.9811 | 0.9859 | | 0.0106 | 48.0 | 3984 | 0.0744 | 0.9735 | 0.9847 | 0.9789 | 0.9842 | | 0.0082 | 49.0 | 4067 | 0.0752 | 0.9722 | 0.9811 | 0.9765 | 0.9824 | | 0.0082 | 50.0 | 4150 | 0.0749 | 0.9722 | 0.9811 | 0.9765 | 0.9824 | ### Framework versions - Transformers 4.22.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "not-illustrated", "illustrated" ]
siddharth963/vit-base-patch16-224-in21k-finetuned-cassava
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-in21k-finetuned-cassava This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.3742 - Accuracy: 0.8706 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5628 | 1.0 | 150 | 0.5357 | 0.8308 | | 0.4398 | 2.0 | 300 | 0.4311 | 0.8598 | | 0.4022 | 3.0 | 450 | 0.3958 | 0.8668 | | 0.3855 | 4.0 | 600 | 0.4030 | 0.8598 | | 0.3659 | 5.0 | 750 | 0.4125 | 0.8617 | | 0.3393 | 6.0 | 900 | 0.3840 | 0.8673 | | 0.3022 | 7.0 | 1050 | 0.3775 | 0.8673 | | 0.2941 | 8.0 | 1200 | 0.3742 | 0.8706 | | 0.2903 | 9.0 | 1350 | 0.3809 | 0.8696 | | 0.2584 | 10.0 | 1500 | 0.3756 | 0.8696 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "cbb", "cbsd", "cgm", "cmd", "h" ]
Alex-VisTas/swin-tiny-patch4-window7-224-finetuned-woody
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-woody This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4349 - Accuracy: 0.7927 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.632 | 1.0 | 58 | 0.5883 | 0.6836 | | 0.6067 | 2.0 | 116 | 0.6017 | 0.6848 | | 0.5865 | 3.0 | 174 | 0.5695 | 0.7042 | | 0.553 | 4.0 | 232 | 0.5185 | 0.7515 | | 0.5468 | 5.0 | 290 | 0.5108 | 0.7430 | | 0.5473 | 6.0 | 348 | 0.4882 | 0.7648 | | 0.5381 | 7.0 | 406 | 0.4800 | 0.7588 | | 0.5468 | 8.0 | 464 | 0.5056 | 0.7358 | | 0.5191 | 9.0 | 522 | 0.4784 | 0.7673 | | 0.5318 | 10.0 | 580 | 0.4762 | 0.7636 | | 0.5079 | 11.0 | 638 | 0.4859 | 0.7673 | | 0.5216 | 12.0 | 696 | 0.4691 | 0.7697 | | 0.515 | 13.0 | 754 | 0.4857 | 0.7624 | | 0.5186 | 14.0 | 812 | 0.4685 | 0.7733 | | 0.4748 | 15.0 | 870 | 0.4536 | 0.7818 | | 0.4853 | 16.0 | 928 | 0.4617 | 0.7770 | | 0.4868 | 17.0 | 986 | 0.4622 | 0.7782 | | 0.4572 | 18.0 | 1044 | 0.4583 | 0.7770 | | 0.4679 | 19.0 | 1102 | 0.4590 | 0.7733 | | 0.4508 | 20.0 | 1160 | 0.4576 | 0.7903 | | 0.4663 | 21.0 | 1218 | 0.4542 | 0.7891 | | 0.4533 | 22.0 | 1276 | 0.4428 | 0.7903 | | 0.4892 | 23.0 | 1334 | 0.4372 | 0.7867 | | 0.4704 | 24.0 | 1392 | 0.4414 | 0.7903 | | 0.4304 | 25.0 | 1450 | 0.4430 | 0.7988 | | 0.4411 | 26.0 | 1508 | 0.4348 | 0.7818 | | 0.4604 | 27.0 | 1566 | 0.4387 | 0.7927 | | 0.441 | 28.0 | 1624 | 0.4378 | 0.7964 | | 0.442 | 29.0 | 1682 | 0.4351 | 0.7915 | | 0.4585 | 30.0 | 1740 | 0.4349 | 0.7927 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.0 - Tokenizers 0.13.1
[ "normal", "woody" ]
DrishtiSharma/finetuned-ConvNext-Indian-food
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-ConvNext-Indian-food This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the indian_food_images dataset. It achieves the following results on the evaluation set: - Loss: 0.2977 - Accuracy: 0.9107 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.3145 | 0.3 | 100 | 1.0460 | 0.8151 | | 0.6694 | 0.6 | 200 | 0.5439 | 0.8757 | | 0.5057 | 0.9 | 300 | 0.4398 | 0.8831 | | 0.4381 | 1.2 | 400 | 0.4286 | 0.8820 | | 0.4376 | 1.5 | 500 | 0.3400 | 0.9044 | | 0.2499 | 1.8 | 600 | 0.3312 | 0.9065 | | 0.2802 | 2.1 | 700 | 0.3338 | 0.9033 | | 0.3014 | 2.4 | 800 | 0.3572 | 0.8948 | | 0.2508 | 2.7 | 900 | 0.3432 | 0.9022 | | 0.2012 | 3.0 | 1000 | 0.3060 | 0.9086 | | 0.2634 | 3.3 | 1100 | 0.3451 | 0.9086 | | 0.2483 | 3.6 | 1200 | 0.3550 | 0.9044 | | 0.2273 | 3.9 | 1300 | 0.2977 | 0.9107 | | 0.1214 | 4.2 | 1400 | 0.3265 | 0.9160 | | 0.2048 | 4.5 | 1500 | 0.3126 | 0.9214 | | 0.0997 | 4.8 | 1600 | 0.3164 | 0.9160 | | 0.1145 | 5.11 | 1700 | 0.3055 | 0.9139 | | 0.1578 | 5.41 | 1800 | 0.3195 | 0.9171 | | 0.0615 | 5.71 | 1900 | 0.3401 | 0.9107 | | 0.1537 | 6.01 | 2000 | 0.3428 | 0.9097 | | 0.1278 | 6.31 | 2100 | 0.3058 | 0.9192 | | 0.1274 | 6.61 | 2200 | 0.3189 | 0.9192 | | 0.0877 | 6.91 | 2300 | 0.3370 | 0.9182 | | 0.1058 | 7.21 | 2400 | 0.3225 | 0.9192 | | 0.1742 | 7.51 | 2500 | 0.3341 | 0.9214 | | 0.0949 | 7.81 | 2600 | 0.3126 | 0.9256 | | 0.1732 | 8.11 | 2700 | 0.3078 | 0.9235 | | 0.0894 | 8.41 | 2800 | 0.3098 | 0.9267 | | 0.1257 | 8.71 | 2900 | 0.3030 | 0.9320 | | 0.1747 | 9.01 | 3000 | 0.3106 | 0.9256 | | 0.2119 | 9.31 | 3100 | 0.3037 | 0.9299 | | 0.1074 | 9.61 | 3200 | 0.3049 | 0.9277 | | 0.1275 | 9.91 | 3300 | 0.3046 | 0.9309 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "burger", "butter_naan", "kaathi_rolls", "kadai_paneer", "kulfi", "masala_dosa", "momos", "paani_puri", "pakode", "pav_bhaji", "pizza", "samosa", "chai", "chapati", "chole_bhature", "dal_makhani", "dhokla", "fried_rice", "idli", "jalebi" ]
carbon225/vit-base-patch16-224-hentai
# ViT for NSFW classification ## Model info This is Google's [vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) finetuned for flagging images according to [vndb.org](https://vndb.org/d19) with 3 classes: - safe - suggestive - explicit ## Training data The model was trained on the vndb.org [database dump](https://vndb.org/d14) using full size screenshots (`sf` in the database dump). The dataset can be loaded from [carbon225/vndb_img](https://huggingface.co/datasets/carbon225/vndb_img). ## Intended use The model can be used for flagging anime-style images for sexual content. It can also be finetuned on other tasks related to anime images.
[ "safe", "suggestive", "explicit" ]
kobe/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0866 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2501 | 1.0 | 130 | 0.2281 | 0.9624 | | 0.2895 | 2.0 | 260 | 0.1138 | 0.9925 | | 0.1549 | 3.0 | 390 | 0.1065 | 0.9774 | | 0.0952 | 4.0 | 520 | 0.0866 | 0.9850 | | 0.1511 | 5.0 | 650 | 0.0875 | 0.9774 | ### Framework versions - Transformers 4.25.0.dev0 - Pytorch 1.12.1 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
[ "angular_leaf_spot", "bean_rust", "healthy" ]
ShuaHousetable/serverless-roomsort
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # serverless-roomsort This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0394 - Accuracy: 0.9892 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7844 | 1.0 | 762 | 0.0608 | 0.9791 | | 0.0361 | 2.0 | 1524 | 0.0626 | 0.9830 | | 0.0149 | 3.0 | 2286 | 0.0468 | 0.9879 | | 0.0027 | 4.0 | 3048 | 0.0394 | 0.9892 | | 0.0017 | 5.0 | 3810 | 0.0486 | 0.9889 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.10.2+cu113 - Datasets 1.18.4 - Tokenizers 0.13.0
[ "bathroom", "kitchen", "bedroom", "livingroom_diningroom", "garage", "hallway", "laundryroom", "office", "blueprints", "exterior" ]
jungjongho/vit-base-tour-demo-v5
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-tour-demo-v5 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.1467 - Accuracy: 0.4880 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 3.4573 | 0.13 | 100 | 3.2038 | 0.3334 | | 2.9547 | 0.27 | 200 | 2.8725 | 0.3672 | | 2.6093 | 0.4 | 300 | 2.7619 | 0.3954 | | 2.6212 | 0.54 | 400 | 2.6269 | 0.3942 | | 2.5063 | 0.67 | 500 | 2.5060 | 0.4211 | | 2.3113 | 0.81 | 600 | 2.5348 | 0.4201 | | 2.5702 | 0.94 | 700 | 2.3345 | 0.4502 | | 2.0479 | 1.08 | 800 | 2.3183 | 0.4484 | | 1.754 | 1.21 | 900 | 2.2546 | 0.4661 | | 1.7772 | 1.34 | 1000 | 2.1994 | 0.4794 | | 1.9276 | 1.48 | 1100 | 2.1672 | 0.4731 | | 1.6621 | 1.61 | 1200 | 2.1676 | 0.4845 | | 1.7063 | 1.75 | 1300 | 2.1446 | 0.4806 | | 1.8655 | 1.88 | 1400 | 2.1121 | 0.4933 | | 1.4577 | 2.02 | 1500 | 2.0934 | 0.4955 | | 1.1857 | 2.15 | 1600 | 2.1128 | 0.4906 | | 1.1684 | 2.28 | 1700 | 2.1218 | 0.4941 | | 1.3873 | 2.42 | 1800 | 2.1108 | 0.4957 | | 1.3545 | 2.55 | 1900 | 2.0985 | 0.4992 | | 0.9789 | 2.69 | 2000 | 2.0997 | 0.4961 | | 1.1772 | 2.82 | 2100 | 2.1141 | 0.4951 | | 1.0968 | 2.96 | 2200 | 2.1097 | 0.4922 | | 0.7883 | 3.09 | 2300 | 2.1170 | 0.5067 | | 0.7593 | 3.23 | 2400 | 2.1516 | 0.4847 | | 0.5671 | 3.36 | 2500 | 2.1414 | 0.4925 | | 0.6442 | 3.49 | 2600 | 2.1498 | 0.4880 | | 0.516 | 3.63 | 2700 | 2.1442 | 0.4878 | | 0.6283 | 3.76 | 2800 | 2.1518 | 0.4882 | | 0.5629 | 3.9 | 2900 | 2.1467 | 0.4880 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "5일장", "atv", "공예,공방", "중식", "채식전문점", "카약/카누", "카지노", "카트", "컨벤션", "컨벤션센터", "콘도미니엄", "클래식음악회", "클럽", "공원", "터널", "테마공원", "트래킹", "특산물판매점", "패밀리레스토랑", "펜션", "폭포", "학교", "한식", "한옥스테이", "관광단지", "항구/포구", "해수욕장", "해안절경", "헬스투어", "헹글라이딩/패러글라이딩", "호수", "홈스테이", "희귀동.식물", "국립공원", "군립공원", "기념관", "기념탑/기념비/전망대", "기암괴석", "기타", "기타행사", "mtb", "농.산.어촌 체험", "다리/대교", "대중콘서트", "대형서점", "도립공원", "도서관", "동굴", "동상", "등대", "래프팅", "강", "면세점", "모텔", "문", "문화관광축제", "문화원", "문화전수시설", "뮤지컬", "미술관/화랑", "민물낚시", "민박", "게스트하우스", "민속마을", "바/까페", "바다낚시", "박람회", "박물관", "발전소", "백화점", "번지점프", "복합 레포츠", "분수", "계곡", "빙벽등반", "사격장", "사찰", "산", "상설시장", "생가", "서비스드레지던스", "서양식", "섬", "성", "고궁", "수련시설", "수목원", "수상레포츠", "수영", "스노쿨링/스킨스쿠버다이빙", "스카이다이빙", "스케이트", "스키(보드) 렌탈샵", "스키/스노보드", "승마", "고택", "식음료", "썰매장", "안보관광", "야영장,오토캠핑장", "약수터", "연극", "영화관", "온천/욕장/스파", "외국문화원", "요트", "골프", "윈드서핑/제트스키", "유람선/잠수함관광", "유명건물", "유스호스텔", "유원지", "유적지/사적지", "이색거리", "이색찜질방", "이색체험", "인라인(실내 인라인 포함)", "공연장", "일반축제", "일식", "자동차경주", "자연생태관광지", "자연휴양림", "자전거하이킹", "전문상가", "전시관", "전통공연", "종교성지" ]
NimaBoscarino/dog_food
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1647758504 - CO2 Emissions (in grams): 6.7999 ## Validation Metrics - Loss: 0.001 - Accuracy: 1.000 - Macro F1: 1.000 - Micro F1: 1.000 - Weighted F1: 1.000 - Macro Precision: 1.000 - Micro Precision: 1.000 - Weighted Precision: 1.000 - Macro Recall: 1.000 - Micro Recall: 1.000 - Weighted Recall: 1.000
[ "chicken", "dog", "muffin" ]
jon-fernandes/vit-base-patch16-224-finetuned-flower
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "daisy", "dandelion", "roses", "sunflowers", "tulips" ]
mouss/autotrain-damages-1652858619
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1652858619 - CO2 Emissions (in grams): 0.0073 ## Validation Metrics - Loss: 0.082 - Accuracy: 0.989 - Precision: 1.000 - Recall: 0.978 - AUC: 0.995 - F1: 0.989
[ "00-damage", "01-whole" ]
ImageIN/convnext-base-224_finetuned_on_unlabelled_IA_with_snorkel_labels
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-base-224_finetuned_on_unlabelled_IA_with_snorkel_labels This model is a fine-tuned version of [facebook/convnext-base-224](https://huggingface.co/facebook/convnext-base-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3443 - Precision: 0.9864 - Recall: 0.9822 - F1: 0.9843 - Accuracy: 0.9884 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP - label_smoothing_factor: 0.2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.3611 | 1.0 | 2021 | 0.3467 | 0.9843 | 0.9729 | 0.9784 | 0.9842 | | 0.3524 | 2.0 | 4042 | 0.3453 | 0.9853 | 0.9790 | 0.9821 | 0.9868 | | 0.3466 | 3.0 | 6063 | 0.3438 | 0.9854 | 0.9847 | 0.9851 | 0.9889 | | 0.3433 | 4.0 | 8084 | 0.3434 | 0.9850 | 0.9808 | 0.9829 | 0.9873 | | 0.3404 | 5.0 | 10105 | 0.3459 | 0.9853 | 0.9790 | 0.9821 | 0.9868 | | 0.3384 | 6.0 | 12126 | 0.3453 | 0.9853 | 0.9790 | 0.9821 | 0.9868 | | 0.3382 | 7.0 | 14147 | 0.3437 | 0.9864 | 0.9822 | 0.9843 | 0.9884 | | 0.3358 | 8.0 | 16168 | 0.3441 | 0.9857 | 0.9829 | 0.9843 | 0.9884 | | 0.3349 | 9.0 | 18189 | 0.3448 | 0.9857 | 0.9829 | 0.9843 | 0.9884 | | 0.3325 | 10.0 | 20210 | 0.3443 | 0.9864 | 0.9822 | 0.9843 | 0.9884 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
[ "illustrated", "not-illustrated" ]
umm-maybe/AI-image-detector
*__NOTE__: Unless you are trying to detect imagery generated using older models such as VQGAN+CLIP, please use the [updated version](https://huggingface.co/Organika/sdxl-detector) of this detector instead.* This model is a proof-of-concept demonstration of using a ViT model to predict whether an artistic image was generated using AI. It was created in October 2022, and as such, the training data did not include any samples generated by Midjourney 5, SDXL, or DALLE-3. It still may be able to correctly identify samples from these more recent models due to being trained on outputs of their predecessors. Furthermore the intended scope of this tool is artistic images; that is to say, it is not a deepfake photo detector, and general computer imagery (webcams, screenshots, etc.) may throw it off. In general, this tool can only serve as one of many potential indicators that an image was AI-generated. Images scoring as very probably artificial (e.g. 90% or higher) could be referred to a human expert for further investigation, if needed. For more information please see the blog post describing this project at: https://medium.com/@matthewmaybe/can-an-ai-learn-to-identify-ai-art-545d9d6af226 # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1519658722 - CO2 Emissions (in grams): 7.9405 ## Validation Metrics - Loss: 0.163 - Accuracy: 0.942 - Precision: 0.938 - Recall: 0.978 - AUC: 0.980 - F1: 0.958 # License Notice This work is licensed under a [Creative Commons Attribution-NoDerivatives 4.0 International License](https://creativecommons.org/licenses/by-nd/4.0/). You may distribute and make this model available to others as part of your own web page, app, or service so long as you provide attribution. However, use of this model within text-to-image systems to evade AI image detection would be considered a "derivative work" and as such prohibited by the license terms.
[ "artificial", "human" ]
juliensimon/autotrain-chest-xray-demo-1677859324
Original dataset: https://www.kaggle.com/datasets/paultimothymooney/chest-xray-pneumonia # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1677859324 - CO2 Emissions (in grams): 13.2197 ## Validation Metrics - Loss: 0.209 - Accuracy: 0.934 - Precision: 0.933 - Recall: 0.964 - AUC: 0.976 - F1: 0.948
[ "normal", "pneumonia" ]
tursunali/autotrain-isuzu-f-left-1681159381
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1681159381 - CO2 Emissions (in grams): 0.8519 ## Validation Metrics - Loss: 0.021 - Accuracy: 0.990 - Precision: 1.000 - Recall: 0.974 - AUC: 1.000 - F1: 0.987
[ "crossed", "not_crossed" ]
jungjongho/vit-base-tour-augmentation-v5
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-tour-augmentation-v5 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 2.4590 - Acc: {'accuracy': 0.42896389324960754} - F1: {'f1': 0.4271599947085357} ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Acc | F1 | |:-------------:|:-----:|:-----:|:---------------:|:---------------------------------:|:---------------------------:| | 1.634 | 0.77 | 1000 | 2.4590 | {'accuracy': 0.42896389324960754} | {'f1': 0.4271599947085357} | | 0.8261 | 1.53 | 2000 | 2.4812 | {'accuracy': 0.4703689167974882} | {'f1': 0.4487847422578378} | | 0.3823 | 2.3 | 3000 | 2.6315 | {'accuracy': 0.46683673469387754} | {'f1': 0.45668802662482005} | | 0.1652 | 3.07 | 4000 | 2.8592 | {'accuracy': 0.46703296703296704} | {'f1': 0.4525782242029193} | | 0.0713 | 3.83 | 5000 | 3.0906 | {'accuracy': 0.4430926216640502} | {'f1': 0.45342569349779865} | | 0.0354 | 4.6 | 6000 | 3.2511 | {'accuracy': 0.45506279434850866} | {'f1': 0.44957410984221347} | | 0.0214 | 5.36 | 7000 | 3.3369 | {'accuracy': 0.47370486656200944} | {'f1': 0.4603765751991713} | | 0.0129 | 6.13 | 8000 | 3.4611 | {'accuracy': 0.4619309262166405} | {'f1': 0.4624748087985776} | | 0.0079 | 6.9 | 9000 | 3.5376 | {'accuracy': 0.46251962323390894} | {'f1': 0.4584329789658534} | | 0.0058 | 7.66 | 10000 | 3.5842 | {'accuracy': 0.4705651491365777} | {'f1': 0.46144792853832145} | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.2 - Tokenizers 0.12.1
[ "5일장", "atv", "공예,공방", "중식", "채식전문점", "카약/카누", "카지노", "카트", "컨벤션", "컨벤션센터", "콘도미니엄", "클래식음악회", "클럽", "공원", "터널", "테마공원", "트래킹", "특산물판매점", "패밀리레스토랑", "펜션", "폭포", "학교", "한식", "한옥스테이", "관광단지", "항구/포구", "해수욕장", "해안절경", "헬스투어", "헹글라이딩/패러글라이딩", "호수", "홈스테이", "희귀동.식물", "국립공원", "군립공원", "기념관", "기념탑/기념비/전망대", "기암괴석", "기타", "기타행사", "mtb", "농.산.어촌 체험", "다리/대교", "대중콘서트", "대형서점", "도립공원", "도서관", "동굴", "동상", "등대", "래프팅", "강", "면세점", "모텔", "문", "문화관광축제", "문화원", "문화전수시설", "뮤지컬", "미술관/화랑", "민물낚시", "민박", "게스트하우스", "민속마을", "바/까페", "바다낚시", "박람회", "박물관", "발전소", "백화점", "번지점프", "복합 레포츠", "분수", "계곡", "빙벽등반", "사격장", "사찰", "산", "상설시장", "생가", "서비스드레지던스", "서양식", "섬", "성", "고궁", "수련시설", "수목원", "수상레포츠", "수영", "스노쿨링/스킨스쿠버다이빙", "스카이다이빙", "스케이트", "스키(보드) 렌탈샵", "스키/스노보드", "승마", "고택", "식음료", "썰매장", "안보관광", "야영장,오토캠핑장", "약수터", "연극", "영화관", "온천/욕장/스파", "외국문화원", "요트", "골프", "윈드서핑/제트스키", "유람선/잠수함관광", "유명건물", "유스호스텔", "유원지", "유적지/사적지", "이색거리", "이색찜질방", "이색체험", "인라인(실내 인라인 포함)", "공연장", "일반축제", "일식", "자동차경주", "자연생태관광지", "자연휴양림", "자전거하이킹", "전문상가", "전시관", "전통공연", "종교성지" ]
renee127/autotrain-vision_6_categories_70_images_each-1691759542
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1691759542 - CO2 Emissions (in grams): 1.8709 ## Validation Metrics - Loss: 0.056 - Accuracy: 0.988 - Macro F1: 0.988 - Micro F1: 0.988 - Weighted F1: 0.988 - Macro Precision: 0.989 - Micro Precision: 0.988 - Weighted Precision: 0.989 - Macro Recall: 0.988 - Micro Recall: 0.988 - Weighted Recall: 0.988
[ "airport_inside", "artstudio", "auditorium", "casino", "inside_bus", "inside_subway" ]
jmcfadden/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0613 - Accuracy: 0.9807 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2578 | 1.0 | 190 | 0.1447 | 0.9530 | | 0.1733 | 2.0 | 380 | 0.0787 | 0.9733 | | 0.1139 | 3.0 | 570 | 0.0613 | 0.9807 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.2 - Tokenizers 0.12.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
fumi13/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0824 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3039 | 1.0 | 130 | 0.2474 | 0.9624 | | 0.1299 | 2.0 | 260 | 0.1007 | 0.9925 | | 0.0885 | 3.0 | 390 | 0.0824 | 0.9925 | | 0.0976 | 4.0 | 520 | 0.1179 | 0.9699 | | 0.1284 | 5.0 | 650 | 0.0832 | 0.9774 | ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.5.2 - Tokenizers 0.13.1
[ "angular_leaf_spot", "bean_rust", "healthy" ]
ImageIN/levit-192_finetuned_on_unlabelled_IA_with_snorkel_labels
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # levit-192_finetuned_on_unlabelled_IA_with_snorkel_labels This model is a fine-tuned version of [facebook/levit-192](https://huggingface.co/facebook/levit-192) on the None dataset. It achieves the following results on the evaluation set: - Loss: nan - Precision: 0.9836 - Recall: 0.9822 - F1: 0.9829 - Accuracy: 0.9873 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 253 | nan | 0.9743 | 0.9791 | 0.9766 | 0.9826 | | 0.0557 | 2.0 | 506 | nan | 0.9829 | 0.9801 | 0.9815 | 0.9863 | | 0.0557 | 3.0 | 759 | nan | 0.9836 | 0.9822 | 0.9829 | 0.9873 | | 0.0543 | 4.0 | 1012 | nan | 0.9839 | 0.9775 | 0.9807 | 0.9858 | | 0.0543 | 5.0 | 1265 | nan | 0.9616 | 0.9727 | 0.9670 | 0.9752 | | 0.0457 | 6.0 | 1518 | nan | 0.9563 | 0.9699 | 0.9629 | 0.9720 | | 0.0457 | 7.0 | 1771 | nan | 0.9822 | 0.9808 | 0.9815 | 0.9863 | | 0.0418 | 8.0 | 2024 | nan | 0.9735 | 0.9769 | 0.9752 | 0.9815 | | 0.0418 | 9.0 | 2277 | nan | 0.9832 | 0.9811 | 0.9822 | 0.9868 | | 0.0396 | 10.0 | 2530 | nan | 0.9843 | 0.9815 | 0.9829 | 0.9873 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.2 - Tokenizers 0.12.1
[ "illustrated", "not-illustrated" ]
monistar/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0525 - Accuracy: 0.9815 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2396 | 1.0 | 190 | 0.1071 | 0.9656 | | 0.1605 | 2.0 | 380 | 0.0665 | 0.9793 | | 0.1282 | 3.0 | 570 | 0.0525 | 0.9815 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
stevhliu/my_awesome_food_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_food_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.1671 - Accuracy: 0.916 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.7213 | 0.99 | 62 | 1.6647 | 0.885 | | 1.2902 | 1.99 | 124 | 1.2744 | 0.918 | | 1.1288 | 2.99 | 186 | 1.1671 | 0.916 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.2 - Tokenizers 0.13.1
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
siddharth963/vit-base-patch16-224-in21k-finetuned-cassava3
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-in21k-finetuned-cassava3 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.3419 - Accuracy: 0.8855 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5624 | 0.99 | 133 | 0.5866 | 0.8166 | | 0.4717 | 1.99 | 266 | 0.4245 | 0.8692 | | 0.4105 | 2.99 | 399 | 0.3708 | 0.8811 | | 0.3753 | 3.99 | 532 | 0.3646 | 0.8787 | | 0.2997 | 4.99 | 665 | 0.3655 | 0.8780 | | 0.3176 | 5.99 | 798 | 0.3545 | 0.8822 | | 0.2849 | 6.99 | 931 | 0.3441 | 0.8850 | | 0.2931 | 7.99 | 1064 | 0.3419 | 0.8855 | | 0.27 | 8.99 | 1197 | 0.3419 | 0.8848 | | 0.2927 | 9.99 | 1330 | 0.3403 | 0.8853 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "cbb", "cbsd", "cgm", "cmd", "h" ]
sanjeev498/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0189 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0568 | 1.54 | 100 | 0.0299 | 1.0 | | 0.0135 | 3.08 | 200 | 0.0189 | 1.0 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.5.2 - Tokenizers 0.13.1
[ "angular_leaf_spot", "bean_rust", "healthy" ]
ImageIN/convnext-tiny-224_finetuned_on_unlabelled_IA_with_snorkel_labels
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224_finetuned_on_unlabelled_IA_with_snorkel_labels This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4381 - Precision: 0.8239 - Recall: 0.7919 - F1: 0.8058 - Accuracy: 0.8629 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 112 | 0.5589 | 0.7547 | 0.5380 | 0.5097 | 0.7679 | | No log | 2.0 | 224 | 0.5578 | 0.7691 | 0.5387 | 0.5103 | 0.7690 | | No log | 3.0 | 336 | 0.4812 | 0.8513 | 0.7371 | 0.7709 | 0.8555 | | No log | 4.0 | 448 | 0.4387 | 0.8734 | 0.6539 | 0.6835 | 0.8259 | | 0.482 | 5.0 | 560 | 0.4427 | 0.8322 | 0.6250 | 0.6449 | 0.8085 | | 0.482 | 6.0 | 672 | 0.6234 | 0.8219 | 0.5702 | 0.5635 | 0.7848 | | 0.482 | 7.0 | 784 | 0.6187 | 0.8791 | 0.6070 | 0.6196 | 0.8054 | | 0.482 | 8.0 | 896 | 0.3953 | 0.8683 | 0.7134 | 0.7507 | 0.8502 | | 0.3656 | 9.0 | 1008 | 0.4381 | 0.8239 | 0.7919 | 0.8058 | 0.8629 | | 0.3656 | 10.0 | 1120 | 0.5346 | 0.7794 | 0.7900 | 0.7844 | 0.8370 | | 0.3656 | 11.0 | 1232 | 0.3685 | 0.8678 | 0.7600 | 0.7943 | 0.8681 | | 0.3656 | 12.0 | 1344 | 0.6900 | 0.6244 | 0.6667 | 0.6099 | 0.6435 | | 0.3656 | 13.0 | 1456 | 0.6097 | 0.6832 | 0.7149 | 0.6931 | 0.7511 | | 0.2987 | 14.0 | 1568 | 0.5435 | 0.8746 | 0.6754 | 0.7096 | 0.8354 | | 0.2987 | 15.0 | 1680 | 0.5525 | 0.7277 | 0.7690 | 0.7411 | 0.7890 | | 0.2987 | 16.0 | 1792 | 0.5003 | 0.8086 | 0.7694 | 0.7856 | 0.8507 | | 0.2987 | 17.0 | 1904 | 0.8172 | 0.6183 | 0.6576 | 0.6074 | 0.6450 | | 0.2598 | 18.0 | 2016 | 0.6102 | 0.6977 | 0.7489 | 0.7070 | 0.75 | | 0.2598 | 19.0 | 2128 | 0.4260 | 0.8523 | 0.7497 | 0.7822 | 0.8602 | | 0.2598 | 20.0 | 2240 | 0.5503 | 0.8276 | 0.6770 | 0.7079 | 0.8281 | | 0.2598 | 21.0 | 2352 | 0.4574 | 0.7994 | 0.7785 | 0.7879 | 0.8481 | | 0.2598 | 22.0 | 2464 | 0.6307 | 0.8620 | 0.6353 | 0.6592 | 0.8165 | | 0.2111 | 23.0 | 2576 | 0.4605 | 0.8196 | 0.7697 | 0.7894 | 0.8555 | | 0.2111 | 24.0 | 2688 | 0.5290 | 0.8152 | 0.7320 | 0.7592 | 0.8434 | | 0.2111 | 25.0 | 2800 | 0.4754 | 0.8755 | 0.7216 | 0.7599 | 0.8550 | | 0.2111 | 26.0 | 2912 | 0.5161 | 0.8428 | 0.7436 | 0.7750 | 0.8555 | | 0.1638 | 27.0 | 3024 | 0.5753 | 0.7358 | 0.7278 | 0.7316 | 0.8043 | | 0.1638 | 28.0 | 3136 | 0.6403 | 0.8468 | 0.7016 | 0.7360 | 0.8412 | | 0.1638 | 29.0 | 3248 | 0.5418 | 0.7912 | 0.7473 | 0.7647 | 0.8381 | | 0.1638 | 30.0 | 3360 | 0.5651 | 0.8240 | 0.7315 | 0.7607 | 0.8460 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.0 - Tokenizers 0.13.1
[ "illustrated", "not-illustrated" ]
holylovenia/problem2-test
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # problem2-test This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.2281 - Accuracy: 0.5574 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.9 | 7 | 6.2748 | 0.0 | | 7.4671 | 1.9 | 14 | 5.1777 | 0.5738 | | 5.3033 | 2.9 | 21 | 4.4616 | 0.5902 | | 5.3033 | 3.9 | 28 | 5.1811 | 0.5246 | | 4.1105 | 4.9 | 35 | 4.2281 | 0.5574 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1 - Datasets 2.6.0 - Tokenizers 0.13.1
[ "tench, tinca tinca", "goldfish, carassius auratus", "great white shark, white shark, man-eater, man-eating shark, carcharodon carcharias", "tiger shark, galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, struthio camelus", "brambling, fringilla montifringilla", "goldfinch, carduelis carduelis", "house finch, linnet, carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, passerina cyanea", "robin, american robin, turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, american eagle, haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, strix nebulosa", "european fire salamander, salamandra salamandra", "common newt, triturus vulgaris", "eft", "spotted salamander, ambystoma maculatum", "axolotl, mud puppy, ambystoma mexicanum", "bullfrog, rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, ascaphus trui", "loggerhead, loggerhead turtle, caretta caretta", "leatherback turtle, leatherback, leathery turtle, dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, iguana iguana", "american chameleon, anole, anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, chlamydosaurus kingi", "alligator lizard", "gila monster, heloderma suspectum", "green lizard, lacerta viridis", "african chameleon, chamaeleo chamaeleon", "komodo dragon, komodo lizard, dragon lizard, giant lizard, varanus komodoensis", "african crocodile, nile crocodile, crocodylus niloticus", "american alligator, alligator mississipiensis", "triceratops", "thunder snake, worm snake, carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, hypsiglena torquata", "boa constrictor, constrictor constrictor", "rock python, rock snake, python sebae", "indian cobra, naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, cerastes cornutus", "diamondback, diamondback rattlesnake, crotalus adamanteus", "sidewinder, horned rattlesnake, crotalus cerastes", "trilobite", "harvestman, daddy longlegs, phalangium opilio", "scorpion", "black and gold garden spider, argiope aurantia", "barn spider, araneus cavaticus", "garden spider, aranea diademata", "black widow, latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "african grey, african gray, psittacus erithacus", "macaw", "sulphur-crested cockatoo, kakatoe galerita, cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, mergus serrator", "goose", "black swan, cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "dungeness crab, cancer magister", "rock crab, cancer irroratus", "fiddler crab", "king crab, alaska crab, alaskan king crab, alaska king crab, paralithodes camtschatica", "american lobster, northern lobster, maine lobster, homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, ciconia ciconia", "black stork, ciconia nigra", "spoonbill", "flamingo", "little blue heron, egretta caerulea", "american egret, great white heron, egretta albus", "bittern", "crane", "limpkin, aramus pictus", "european gallinule, porphyrio porphyrio", "american coot, marsh hen, mud hen, water hen, fulica americana", "bustard", "ruddy turnstone, arenaria interpres", "red-backed sandpiper, dunlin, erolia alpina", "redshank, tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, eschrichtius gibbosus, eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, orcinus orca", "dugong, dugong dugon", "sea lion", "chihuahua", "japanese spaniel", "maltese dog, maltese terrier, maltese", "pekinese, pekingese, peke", "shih-tzu", "blenheim spaniel", "papillon", "toy terrier", "rhodesian ridgeback", "afghan hound, afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "walker hound, walker foxhound", "english foxhound", "redbone", "borzoi, russian wolfhound", "irish wolfhound", "italian greyhound", "whippet", "ibizan hound, ibizan podenco", "norwegian elkhound, elkhound", "otterhound, otter hound", "saluki, gazelle hound", "scottish deerhound, deerhound", "weimaraner", "staffordshire bullterrier, staffordshire bull terrier", "american staffordshire terrier, staffordshire terrier, american pit bull terrier, pit bull terrier", "bedlington terrier", "border terrier", "kerry blue terrier", "irish terrier", "norfolk terrier", "norwich terrier", "yorkshire terrier", "wire-haired fox terrier", "lakeland terrier", "sealyham terrier, sealyham", "airedale, airedale terrier", "cairn, cairn terrier", "australian terrier", "dandie dinmont, dandie dinmont terrier", "boston bull, boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "scotch terrier, scottish terrier, scottie", "tibetan terrier, chrysanthemum dog", "silky terrier, sydney silky", "soft-coated wheaten terrier", "west highland white terrier", "lhasa, lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "labrador retriever", "chesapeake bay retriever", "german short-haired pointer", "vizsla, hungarian pointer", "english setter", "irish setter, red setter", "gordon setter", "brittany spaniel", "clumber, clumber spaniel", "english springer, english springer spaniel", "welsh springer spaniel", "cocker spaniel, english cocker spaniel, cocker", "sussex spaniel", "irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "old english sheepdog, bobtail", "shetland sheepdog, shetland sheep dog, shetland", "collie", "border collie", "bouvier des flandres, bouviers des flandres", "rottweiler", "german shepherd, german shepherd dog, german police dog, alsatian", "doberman, doberman pinscher", "miniature pinscher", "greater swiss mountain dog", "bernese mountain dog", "appenzeller", "entlebucher", "boxer", "bull mastiff", "tibetan mastiff", "french bulldog", "great dane", "saint bernard, st bernard", "eskimo dog, husky", "malamute, malemute, alaskan malamute", "siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "leonberg", "newfoundland, newfoundland dog", "great pyrenees", "samoyed, samoyede", "pomeranian", "chow, chow chow", "keeshond", "brabancon griffon", "pembroke, pembroke welsh corgi", "cardigan, cardigan welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "mexican hairless", "timber wolf, grey wolf, gray wolf, canis lupus", "white wolf, arctic wolf, canis lupus tundrarum", "red wolf, maned wolf, canis rufus, canis niger", "coyote, prairie wolf, brush wolf, canis latrans", "dingo, warrigal, warragal, canis dingo", "dhole, cuon alpinus", "african hunting dog, hyena dog, cape hunting dog, lycaon pictus", "hyena, hyaena", "red fox, vulpes vulpes", "kit fox, vulpes macrotis", "arctic fox, white fox, alopex lagopus", "grey fox, gray fox, urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "persian cat", "siamese cat, siamese", "egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, felis concolor", "lynx, catamount", "leopard, panthera pardus", "snow leopard, ounce, panthera uncia", "jaguar, panther, panthera onca, felis onca", "lion, king of beasts, panthera leo", "tiger, panthera tigris", "cheetah, chetah, acinonyx jubatus", "brown bear, bruin, ursus arctos", "american black bear, black bear, ursus americanus, euarctos americanus", "ice bear, polar bear, ursus maritimus, thalarctos maritimus", "sloth bear, melursus ursinus, ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "angora, angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, sciurus niger", "marmot", "beaver", "guinea pig, cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, sus scrofa", "wild boar, boar, sus scrofa", "warthog", "hippopotamus, hippo, river horse, hippopotamus amphibius", "ox", "water buffalo, water ox, asiatic buffalo, bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, rocky mountain bighorn, rocky mountain sheep, ovis canadensis", "ibex, capra ibex", "hartebeest", "impala, aepyceros melampus", "gazelle", "arabian camel, dromedary, camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, mustela putorius", "black-footed ferret, ferret, mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, bradypus tridactylus", "orangutan, orang, orangutang, pongo pygmaeus", "gorilla, gorilla gorilla", "chimpanzee, chimp, pan troglodytes", "gibbon, hylobates lar", "siamang, hylobates syndactylus, symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, nasalis larvatus", "marmoset", "capuchin, ringtail, cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, ateles geoffroyi", "squirrel monkey, saimiri sciureus", "madagascar cat, ring-tailed lemur, lemur catta", "indri, indris, indri indri, indri brevicaudatus", "indian elephant, elephas maximus", "african elephant, loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, ailurus fulgens", "giant panda, panda, panda bear, coon bear, ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, oncorhynchus kisutch", "rock beauty, holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, biro", "band aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, atm", "cassette", "cassette player", "castle", "catamaran", "cd player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "crock pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "french horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "ipod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, t-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "model t", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, cro", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "polaroid camera, polaroid land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, rv, r.v.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, crt screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, u-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "french loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "granny smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, cypripedium calceolus, cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, polyporus frondosus, grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue" ]
holylovenia/resnet50
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet50 This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the None dataset. It achieves the following results on the evaluation set: - Loss: 8.2042 - Accuracy: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 10 - total_train_batch_size: 320 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 6.8491 | 1.0 | 1 | 8.1612 | 0.0 | | 6.7309 | 2.0 | 2 | 7.3393 | 0.0 | | 6.8199 | 3.0 | 3 | 7.9807 | 0.0 | | 6.8118 | 4.0 | 4 | 8.6801 | 0.0 | | 6.7573 | 5.0 | 5 | 8.2042 | 0.0 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1 - Datasets 2.6.0 - Tokenizers 0.13.1
[ "tench, tinca tinca", "goldfish, carassius auratus", "great white shark, white shark, man-eater, man-eating shark, carcharodon carcharias", "tiger shark, galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, struthio camelus", "brambling, fringilla montifringilla", "goldfinch, carduelis carduelis", "house finch, linnet, carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, passerina cyanea", "robin, american robin, turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, american eagle, haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, strix nebulosa", "european fire salamander, salamandra salamandra", "common newt, triturus vulgaris", "eft", "spotted salamander, ambystoma maculatum", "axolotl, mud puppy, ambystoma mexicanum", "bullfrog, rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, ascaphus trui", "loggerhead, loggerhead turtle, caretta caretta", "leatherback turtle, leatherback, leathery turtle, dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, iguana iguana", "american chameleon, anole, anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, chlamydosaurus kingi", "alligator lizard", "gila monster, heloderma suspectum", "green lizard, lacerta viridis", "african chameleon, chamaeleo chamaeleon", "komodo dragon, komodo lizard, dragon lizard, giant lizard, varanus komodoensis", "african crocodile, nile crocodile, crocodylus niloticus", "american alligator, alligator mississipiensis", "triceratops", "thunder snake, worm snake, carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, hypsiglena torquata", "boa constrictor, constrictor constrictor", "rock python, rock snake, python sebae", "indian cobra, naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, cerastes cornutus", "diamondback, diamondback rattlesnake, crotalus adamanteus", "sidewinder, horned rattlesnake, crotalus cerastes", "trilobite", "harvestman, daddy longlegs, phalangium opilio", "scorpion", "black and gold garden spider, argiope aurantia", "barn spider, araneus cavaticus", "garden spider, aranea diademata", "black widow, latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "african grey, african gray, psittacus erithacus", "macaw", "sulphur-crested cockatoo, kakatoe galerita, cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, mergus serrator", "goose", "black swan, cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "dungeness crab, cancer magister", "rock crab, cancer irroratus", "fiddler crab", "king crab, alaska crab, alaskan king crab, alaska king crab, paralithodes camtschatica", "american lobster, northern lobster, maine lobster, homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, ciconia ciconia", "black stork, ciconia nigra", "spoonbill", "flamingo", "little blue heron, egretta caerulea", "american egret, great white heron, egretta albus", "bittern", "crane", "limpkin, aramus pictus", "european gallinule, porphyrio porphyrio", "american coot, marsh hen, mud hen, water hen, fulica americana", "bustard", "ruddy turnstone, arenaria interpres", "red-backed sandpiper, dunlin, erolia alpina", "redshank, tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, eschrichtius gibbosus, eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, orcinus orca", "dugong, dugong dugon", "sea lion", "chihuahua", "japanese spaniel", "maltese dog, maltese terrier, maltese", "pekinese, pekingese, peke", "shih-tzu", "blenheim spaniel", "papillon", "toy terrier", "rhodesian ridgeback", "afghan hound, afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "walker hound, walker foxhound", "english foxhound", "redbone", "borzoi, russian wolfhound", "irish wolfhound", "italian greyhound", "whippet", "ibizan hound, ibizan podenco", "norwegian elkhound, elkhound", "otterhound, otter hound", "saluki, gazelle hound", "scottish deerhound, deerhound", "weimaraner", "staffordshire bullterrier, staffordshire bull terrier", "american staffordshire terrier, staffordshire terrier, american pit bull terrier, pit bull terrier", "bedlington terrier", "border terrier", "kerry blue terrier", "irish terrier", "norfolk terrier", "norwich terrier", "yorkshire terrier", "wire-haired fox terrier", "lakeland terrier", "sealyham terrier, sealyham", "airedale, airedale terrier", "cairn, cairn terrier", "australian terrier", "dandie dinmont, dandie dinmont terrier", "boston bull, boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "scotch terrier, scottish terrier, scottie", "tibetan terrier, chrysanthemum dog", "silky terrier, sydney silky", "soft-coated wheaten terrier", "west highland white terrier", "lhasa, lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "labrador retriever", "chesapeake bay retriever", "german short-haired pointer", "vizsla, hungarian pointer", "english setter", "irish setter, red setter", "gordon setter", "brittany spaniel", "clumber, clumber spaniel", "english springer, english springer spaniel", "welsh springer spaniel", "cocker spaniel, english cocker spaniel, cocker", "sussex spaniel", "irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "old english sheepdog, bobtail", "shetland sheepdog, shetland sheep dog, shetland", "collie", "border collie", "bouvier des flandres, bouviers des flandres", "rottweiler", "german shepherd, german shepherd dog, german police dog, alsatian", "doberman, doberman pinscher", "miniature pinscher", "greater swiss mountain dog", "bernese mountain dog", "appenzeller", "entlebucher", "boxer", "bull mastiff", "tibetan mastiff", "french bulldog", "great dane", "saint bernard, st bernard", "eskimo dog, husky", "malamute, malemute, alaskan malamute", "siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "leonberg", "newfoundland, newfoundland dog", "great pyrenees", "samoyed, samoyede", "pomeranian", "chow, chow chow", "keeshond", "brabancon griffon", "pembroke, pembroke welsh corgi", "cardigan, cardigan welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "mexican hairless", "timber wolf, grey wolf, gray wolf, canis lupus", "white wolf, arctic wolf, canis lupus tundrarum", "red wolf, maned wolf, canis rufus, canis niger", "coyote, prairie wolf, brush wolf, canis latrans", "dingo, warrigal, warragal, canis dingo", "dhole, cuon alpinus", "african hunting dog, hyena dog, cape hunting dog, lycaon pictus", "hyena, hyaena", "red fox, vulpes vulpes", "kit fox, vulpes macrotis", "arctic fox, white fox, alopex lagopus", "grey fox, gray fox, urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "persian cat", "siamese cat, siamese", "egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, felis concolor", "lynx, catamount", "leopard, panthera pardus", "snow leopard, ounce, panthera uncia", "jaguar, panther, panthera onca, felis onca", "lion, king of beasts, panthera leo", "tiger, panthera tigris", "cheetah, chetah, acinonyx jubatus", "brown bear, bruin, ursus arctos", "american black bear, black bear, ursus americanus, euarctos americanus", "ice bear, polar bear, ursus maritimus, thalarctos maritimus", "sloth bear, melursus ursinus, ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "angora, angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, sciurus niger", "marmot", "beaver", "guinea pig, cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, sus scrofa", "wild boar, boar, sus scrofa", "warthog", "hippopotamus, hippo, river horse, hippopotamus amphibius", "ox", "water buffalo, water ox, asiatic buffalo, bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, rocky mountain bighorn, rocky mountain sheep, ovis canadensis", "ibex, capra ibex", "hartebeest", "impala, aepyceros melampus", "gazelle", "arabian camel, dromedary, camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, mustela putorius", "black-footed ferret, ferret, mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, bradypus tridactylus", "orangutan, orang, orangutang, pongo pygmaeus", "gorilla, gorilla gorilla", "chimpanzee, chimp, pan troglodytes", "gibbon, hylobates lar", "siamang, hylobates syndactylus, symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, nasalis larvatus", "marmoset", "capuchin, ringtail, cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, ateles geoffroyi", "squirrel monkey, saimiri sciureus", "madagascar cat, ring-tailed lemur, lemur catta", "indri, indris, indri indri, indri brevicaudatus", "indian elephant, elephas maximus", "african elephant, loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, ailurus fulgens", "giant panda, panda, panda bear, coon bear, ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, oncorhynchus kisutch", "rock beauty, holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, biro", "band aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, atm", "cassette", "cassette player", "castle", "catamaran", "cd player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "crock pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "french horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "ipod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, t-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "model t", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, cro", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "polaroid camera, polaroid land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, rv, r.v.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, crt screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, u-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "french loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "granny smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, cypripedium calceolus, cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, polyporus frondosus, grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue" ]
Alex-VisTas/swin-tiny-patch4-window7-224-finetuned-woody_90epochs
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-woody_90epochs This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4351 - Accuracy: 0.8424 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 90 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6659 | 1.0 | 58 | 0.6216 | 0.6558 | | 0.6181 | 2.0 | 116 | 0.5616 | 0.7115 | | 0.5941 | 3.0 | 174 | 0.5464 | 0.7224 | | 0.5727 | 4.0 | 232 | 0.5368 | 0.7297 | | 0.573 | 5.0 | 290 | 0.4971 | 0.7539 | | 0.5724 | 6.0 | 348 | 0.4920 | 0.7467 | | 0.5584 | 7.0 | 406 | 0.4949 | 0.7564 | | 0.5352 | 8.0 | 464 | 0.5255 | 0.7406 | | 0.5857 | 9.0 | 522 | 0.4954 | 0.7515 | | 0.5352 | 10.0 | 580 | 0.4888 | 0.7455 | | 0.5161 | 11.0 | 638 | 0.5306 | 0.7224 | | 0.5457 | 12.0 | 696 | 0.4856 | 0.76 | | 0.5309 | 13.0 | 754 | 0.4647 | 0.7612 | | 0.5357 | 14.0 | 812 | 0.4688 | 0.7697 | | 0.5183 | 15.0 | 870 | 0.4830 | 0.7527 | | 0.4837 | 16.0 | 928 | 0.5238 | 0.7370 | | 0.51 | 17.0 | 986 | 0.4658 | 0.7745 | | 0.533 | 18.0 | 1044 | 0.4589 | 0.7673 | | 0.4808 | 19.0 | 1102 | 0.4375 | 0.7794 | | 0.4854 | 20.0 | 1160 | 0.4574 | 0.7745 | | 0.4708 | 21.0 | 1218 | 0.4738 | 0.7709 | | 0.4801 | 22.0 | 1276 | 0.4688 | 0.76 | | 0.4751 | 23.0 | 1334 | 0.4610 | 0.7648 | | 0.497 | 24.0 | 1392 | 0.5058 | 0.7624 | | 0.4767 | 25.0 | 1450 | 0.4709 | 0.7721 | | 0.4805 | 26.0 | 1508 | 0.4447 | 0.7697 | | 0.4557 | 27.0 | 1566 | 0.4558 | 0.7721 | | 0.4636 | 28.0 | 1624 | 0.4325 | 0.8036 | | 0.4285 | 29.0 | 1682 | 0.4526 | 0.7794 | | 0.4358 | 30.0 | 1740 | 0.4302 | 0.8048 | | 0.4257 | 31.0 | 1798 | 0.4373 | 0.7927 | | 0.4137 | 32.0 | 1856 | 0.4458 | 0.7903 | | 0.4389 | 33.0 | 1914 | 0.4522 | 0.7988 | | 0.4537 | 34.0 | 1972 | 0.4395 | 0.7927 | | 0.4249 | 35.0 | 2030 | 0.4348 | 0.8 | | 0.4244 | 36.0 | 2088 | 0.4650 | 0.7867 | | 0.4256 | 37.0 | 2146 | 0.4402 | 0.8012 | | 0.4118 | 38.0 | 2204 | 0.4394 | 0.7867 | | 0.4128 | 39.0 | 2262 | 0.4225 | 0.8133 | | 0.416 | 40.0 | 2320 | 0.4410 | 0.8073 | | 0.4211 | 41.0 | 2378 | 0.4464 | 0.8024 | | 0.3838 | 42.0 | 2436 | 0.4440 | 0.7976 | | 0.374 | 43.0 | 2494 | 0.4175 | 0.7903 | | 0.412 | 44.0 | 2552 | 0.4169 | 0.8109 | | 0.3746 | 45.0 | 2610 | 0.4243 | 0.8012 | | 0.3719 | 46.0 | 2668 | 0.4132 | 0.8242 | | 0.381 | 47.0 | 2726 | 0.4485 | 0.7988 | | 0.3708 | 48.0 | 2784 | 0.4200 | 0.8085 | | 0.3591 | 49.0 | 2842 | 0.4071 | 0.8279 | | 0.3762 | 50.0 | 2900 | 0.4428 | 0.8145 | | 0.3426 | 51.0 | 2958 | 0.4058 | 0.8158 | | 0.3541 | 52.0 | 3016 | 0.4470 | 0.8182 | | 0.3373 | 53.0 | 3074 | 0.4252 | 0.8194 | | 0.3303 | 54.0 | 3132 | 0.4040 | 0.8315 | | 0.3275 | 55.0 | 3190 | 0.4235 | 0.8291 | | 0.3151 | 56.0 | 3248 | 0.3984 | 0.8485 | | 0.324 | 57.0 | 3306 | 0.4283 | 0.8291 | | 0.3276 | 58.0 | 3364 | 0.4731 | 0.8145 | | 0.3208 | 59.0 | 3422 | 0.4360 | 0.8255 | | 0.3355 | 60.0 | 3480 | 0.4143 | 0.8230 | | 0.3154 | 61.0 | 3538 | 0.4234 | 0.8267 | | 0.3451 | 62.0 | 3596 | 0.4059 | 0.8242 | | 0.3071 | 63.0 | 3654 | 0.3991 | 0.8267 | | 0.3303 | 64.0 | 3712 | 0.4099 | 0.8242 | | 0.29 | 65.0 | 3770 | 0.4140 | 0.8327 | | 0.2937 | 66.0 | 3828 | 0.4590 | 0.8218 | | 0.3322 | 67.0 | 3886 | 0.4111 | 0.8327 | | 0.3219 | 68.0 | 3944 | 0.4299 | 0.8327 | | 0.2839 | 69.0 | 4002 | 0.4074 | 0.8424 | | 0.2903 | 70.0 | 4060 | 0.4366 | 0.8315 | | 0.2851 | 71.0 | 4118 | 0.4132 | 0.8473 | | 0.3029 | 72.0 | 4176 | 0.4239 | 0.8473 | | 0.2693 | 73.0 | 4234 | 0.4194 | 0.8412 | | 0.2715 | 74.0 | 4292 | 0.4384 | 0.8412 | | 0.2842 | 75.0 | 4350 | 0.4279 | 0.8448 | | 0.2733 | 76.0 | 4408 | 0.4174 | 0.84 | | 0.2694 | 77.0 | 4466 | 0.3966 | 0.8388 | | 0.2527 | 78.0 | 4524 | 0.4194 | 0.8364 | | 0.2813 | 79.0 | 4582 | 0.4231 | 0.8436 | | 0.2618 | 80.0 | 4640 | 0.4494 | 0.8352 | | 0.2639 | 81.0 | 4698 | 0.4152 | 0.8388 | | 0.2643 | 82.0 | 4756 | 0.4241 | 0.8448 | | 0.276 | 83.0 | 4814 | 0.4518 | 0.8327 | | 0.2761 | 84.0 | 4872 | 0.4349 | 0.8412 | | 0.2295 | 85.0 | 4930 | 0.4504 | 0.8315 | | 0.2723 | 86.0 | 4988 | 0.4385 | 0.8388 | | 0.2559 | 87.0 | 5046 | 0.4362 | 0.8473 | | 0.2583 | 88.0 | 5104 | 0.4273 | 0.8436 | | 0.2523 | 89.0 | 5162 | 0.4292 | 0.8424 | | 0.2563 | 90.0 | 5220 | 0.4351 | 0.8424 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "normal", "woody" ]
jayantapaul888/vit-base-patch16-224-finetuned-memes-v2
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-memes-v2 This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4096 - Accuracy: 0.8377 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.8643 | 0.99 | 20 | 0.6406 | 0.7720 | | 0.4279 | 1.99 | 40 | 0.4885 | 0.8130 | | 0.2272 | 2.99 | 60 | 0.4224 | 0.8331 | | 0.1483 | 3.99 | 80 | 0.4096 | 0.8377 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
ezzouhri/vit-base-patch16-224-in21k-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-in21k-finetuned-eurosat This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - eval_loss: 0.2695 - eval_accuracy: 0.9022 - eval_runtime: 195.5267 - eval_samples_per_second: 21.486 - eval_steps_per_second: 0.675 - epoch: 51.76 - step: 10196 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 200 ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.1+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
[ "c0", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9" ]
micole66/autotrain-animals-1797562141
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1797562141 - CO2 Emissions (in grams): 0.6999 ## Validation Metrics - Loss: 0.096 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "other characters", "pachyderms" ]
jafdxc/vit-base-patch16-224-finetuned-flower
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "daisy", "dandelion", "roses", "sunflowers", "tulips" ]
jayantapaul888/vit-base-patch16-224-finetuned-memes-v3
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-memes-v3 This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.3862 - Accuracy: 0.8478 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5649 | 0.99 | 40 | 0.6342 | 0.7488 | | 0.3083 | 1.99 | 80 | 0.4146 | 0.8423 | | 0.1563 | 2.99 | 120 | 0.3900 | 0.8547 | | 0.0827 | 3.99 | 160 | 0.3862 | 0.8478 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
micole66/autotrain-mercuryorsodium-1804662320
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1804662320 - CO2 Emissions (in grams): 0.3398 ## Validation Metrics - Loss: 0.186 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "high pressure sodium", "mercury vapor" ]
platzi/platzi-vit-model-yeder-lvicente
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-vit-model-yeder-lvicente This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0077 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0084 | 3.85 | 500 | 0.0077 | 1.0 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "angular_leaf_spot", "bean_rust", "healthy" ]
surajjoshi/swin-tiny-patch4-window7-224-finetuned-brainTumorData
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-brainTumorData This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 4 ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "no", "yes" ]
Norod78/swin-muppet-faces
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1816962673 - CO2 Emissions (in grams): 0.0115 ## Validation Metrics - Loss: 0.208 - Accuracy: 0.963 - Macro F1: 0.935 - Micro F1: 0.963 - Weighted F1: 0.962 - Macro Precision: 0.945 - Micro Precision: 0.963 - Weighted Precision: 0.965 - Macro Recall: 0.933 - Micro Recall: 0.963 - Weighted Recall: 0.963
[ "animal", "beaker", "fozzie", "gonzo", "grover", "kermit", "oscar", "pepe", "piggy", "rowlf", "scooter", "statler", "bert", "swedishchef", "thecount", "waldorf", "zoot", "bigbird", "bunsen", "camilla", "cookiemonster", "elmo", "ernie", "floyd" ]
jayanta/resnet-50-finetuned-memes-v2
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet-50-finetuned-memes-v2 This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.3295 - Accuracy: 0.4567 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.4954 | 0.99 | 20 | 1.4559 | 0.4567 | | 1.407 | 1.99 | 40 | 1.3772 | 0.4567 | | 1.3744 | 2.99 | 60 | 1.3378 | 0.4567 | | 1.3427 | 3.99 | 80 | 1.3295 | 0.4567 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/vit-base-patch16-224-FV-20epochs-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-FV-20epochs-finetuned-memes This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6532 - Accuracy: 0.8632 - Precision: 0.8617 - Recall: 0.8632 - F1: 0.8621 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.1709 | 0.99 | 20 | 0.9393 | 0.6971 | 0.6896 | 0.6971 | 0.6890 | | 0.5295 | 1.99 | 40 | 0.5024 | 0.8091 | 0.8210 | 0.8091 | 0.8133 | | 0.2909 | 2.99 | 60 | 0.4070 | 0.8539 | 0.8529 | 0.8539 | 0.8529 | | 0.1435 | 3.99 | 80 | 0.4136 | 0.8539 | 0.8522 | 0.8539 | 0.8522 | | 0.0928 | 4.99 | 100 | 0.4495 | 0.8478 | 0.8548 | 0.8478 | 0.8507 | | 0.0643 | 5.99 | 120 | 0.4897 | 0.8594 | 0.8572 | 0.8594 | 0.8573 | | 0.061 | 6.99 | 140 | 0.5040 | 0.8423 | 0.8490 | 0.8423 | 0.8453 | | 0.0519 | 7.99 | 160 | 0.5266 | 0.8524 | 0.8502 | 0.8524 | 0.8510 | | 0.0546 | 8.99 | 180 | 0.5200 | 0.8586 | 0.8632 | 0.8586 | 0.8605 | | 0.0478 | 9.99 | 200 | 0.5654 | 0.8555 | 0.8548 | 0.8555 | 0.8548 | | 0.0509 | 10.99 | 220 | 0.5774 | 0.8609 | 0.8626 | 0.8609 | 0.8616 | | 0.0467 | 11.99 | 240 | 0.5847 | 0.8594 | 0.8602 | 0.8594 | 0.8594 | | 0.0468 | 12.99 | 260 | 0.5909 | 0.8601 | 0.8597 | 0.8601 | 0.8596 | | 0.0469 | 13.99 | 280 | 0.5970 | 0.8563 | 0.8560 | 0.8563 | 0.8561 | | 0.0438 | 14.99 | 300 | 0.6234 | 0.8594 | 0.8583 | 0.8594 | 0.8586 | | 0.0441 | 15.99 | 320 | 0.6190 | 0.8563 | 0.8582 | 0.8563 | 0.8570 | | 0.0431 | 16.99 | 340 | 0.6419 | 0.8570 | 0.8584 | 0.8570 | 0.8574 | | 0.0454 | 17.99 | 360 | 0.6528 | 0.8563 | 0.8556 | 0.8563 | 0.8558 | | 0.0417 | 18.99 | 380 | 0.6688 | 0.8578 | 0.8575 | 0.8578 | 0.8574 | | 0.0432 | 19.99 | 400 | 0.6532 | 0.8632 | 0.8617 | 0.8632 | 0.8621 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/mit-b2-fv-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mit-b2-fv-finetuned-memes This model is a fine-tuned version of [nvidia/mit-b2](https://huggingface.co/nvidia/mit-b2) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.5984 - Accuracy: 0.8323 - Precision: 0.8312 - Recall: 0.8323 - F1: 0.8315 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.3683 | 0.99 | 20 | 1.1798 | 0.5703 | 0.4914 | 0.5703 | 0.4915 | | 1.0113 | 1.99 | 40 | 1.0384 | 0.6159 | 0.6813 | 0.6159 | 0.6274 | | 0.7581 | 2.99 | 60 | 0.8348 | 0.6808 | 0.7377 | 0.6808 | 0.6840 | | 0.6241 | 3.99 | 80 | 0.6034 | 0.7713 | 0.7864 | 0.7713 | 0.7735 | | 0.4999 | 4.99 | 100 | 0.5481 | 0.7944 | 0.8000 | 0.7944 | 0.7909 | | 0.3981 | 5.99 | 120 | 0.5253 | 0.8022 | 0.8091 | 0.8022 | 0.8000 | | 0.3484 | 6.99 | 140 | 0.4688 | 0.8238 | 0.8147 | 0.8238 | 0.8146 | | 0.3142 | 7.99 | 160 | 0.6245 | 0.7867 | 0.8209 | 0.7867 | 0.7920 | | 0.2339 | 8.99 | 180 | 0.5053 | 0.8362 | 0.8426 | 0.8362 | 0.8355 | | 0.2284 | 9.99 | 200 | 0.5070 | 0.8230 | 0.8220 | 0.8230 | 0.8187 | | 0.1824 | 10.99 | 220 | 0.5780 | 0.8006 | 0.8138 | 0.8006 | 0.8035 | | 0.1561 | 11.99 | 240 | 0.5429 | 0.8253 | 0.8197 | 0.8253 | 0.8218 | | 0.1229 | 12.99 | 260 | 0.5325 | 0.8331 | 0.8296 | 0.8331 | 0.8303 | | 0.1232 | 13.99 | 280 | 0.5595 | 0.8277 | 0.8290 | 0.8277 | 0.8273 | | 0.118 | 14.99 | 300 | 0.5974 | 0.8292 | 0.8345 | 0.8292 | 0.8299 | | 0.11 | 15.99 | 320 | 0.5796 | 0.8253 | 0.8228 | 0.8253 | 0.8231 | | 0.0948 | 16.99 | 340 | 0.5581 | 0.8346 | 0.8358 | 0.8346 | 0.8349 | | 0.0985 | 17.99 | 360 | 0.5700 | 0.8338 | 0.8301 | 0.8338 | 0.8318 | | 0.0821 | 18.99 | 380 | 0.5756 | 0.8331 | 0.8343 | 0.8331 | 0.8335 | | 0.0813 | 19.99 | 400 | 0.5984 | 0.8323 | 0.8312 | 0.8323 | 0.8315 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/resnet-152-fv-finetuned-memess
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet-152-fv-finetuned-memess This model is a fine-tuned version of [microsoft/resnet-152](https://huggingface.co/microsoft/resnet-152) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6281 - Accuracy: 0.7674 - Precision: 0.7651 - Recall: 0.7674 - F1: 0.7647 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.5902 | 0.99 | 20 | 1.5519 | 0.4938 | 0.3491 | 0.4938 | 0.3529 | | 1.4694 | 1.99 | 40 | 1.3730 | 0.4892 | 0.4095 | 0.4892 | 0.3222 | | 1.3129 | 2.99 | 60 | 1.2052 | 0.5301 | 0.3504 | 0.5301 | 0.4005 | | 1.1831 | 3.99 | 80 | 1.1142 | 0.5587 | 0.4077 | 0.5587 | 0.4444 | | 1.0581 | 4.99 | 100 | 0.9930 | 0.6012 | 0.5680 | 0.6012 | 0.5108 | | 0.9464 | 5.99 | 120 | 0.9263 | 0.6507 | 0.6200 | 0.6507 | 0.6029 | | 0.8581 | 6.99 | 140 | 0.8400 | 0.6917 | 0.6645 | 0.6917 | 0.6638 | | 0.7739 | 7.99 | 160 | 0.7829 | 0.7087 | 0.6918 | 0.7087 | 0.6845 | | 0.6762 | 8.99 | 180 | 0.7512 | 0.7318 | 0.7206 | 0.7318 | 0.7189 | | 0.6162 | 9.99 | 200 | 0.7409 | 0.7264 | 0.7244 | 0.7264 | 0.7241 | | 0.5546 | 10.99 | 220 | 0.6936 | 0.7465 | 0.7429 | 0.7465 | 0.7395 | | 0.4633 | 11.99 | 240 | 0.6779 | 0.7473 | 0.7393 | 0.7473 | 0.7412 | | 0.4373 | 12.99 | 260 | 0.6736 | 0.7573 | 0.7492 | 0.7573 | 0.7523 | | 0.4074 | 13.99 | 280 | 0.6534 | 0.7566 | 0.7516 | 0.7566 | 0.7528 | | 0.39 | 14.99 | 300 | 0.6521 | 0.7651 | 0.7603 | 0.7651 | 0.7608 | | 0.3766 | 15.99 | 320 | 0.6499 | 0.7682 | 0.7607 | 0.7682 | 0.7630 | | 0.3507 | 16.99 | 340 | 0.6497 | 0.7697 | 0.7686 | 0.7697 | 0.7686 | | 0.3589 | 17.99 | 360 | 0.6519 | 0.7535 | 0.7485 | 0.7535 | 0.7502 | | 0.3261 | 18.99 | 380 | 0.6449 | 0.7589 | 0.7597 | 0.7589 | 0.7585 | | 0.3234 | 19.99 | 400 | 0.6281 | 0.7674 | 0.7651 | 0.7674 | 0.7647 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/swin-large-patch4-window7-224-fv-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-large-patch4-window7-224-fv-finetuned-memes This model is a fine-tuned version of [microsoft/swin-large-patch4-window7-224](https://huggingface.co/microsoft/swin-large-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6502 - Accuracy: 0.8601 - Precision: 0.8582 - Recall: 0.8601 - F1: 0.8583 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.2077 | 0.99 | 20 | 0.9499 | 0.6461 | 0.6764 | 0.6461 | 0.5863 | | 0.5687 | 1.99 | 40 | 0.5365 | 0.7975 | 0.8018 | 0.7975 | 0.7924 | | 0.3607 | 2.99 | 60 | 0.4007 | 0.8423 | 0.8419 | 0.8423 | 0.8398 | | 0.203 | 3.99 | 80 | 0.3751 | 0.8509 | 0.8502 | 0.8509 | 0.8503 | | 0.1728 | 4.99 | 100 | 0.4168 | 0.8509 | 0.8519 | 0.8509 | 0.8506 | | 0.0963 | 5.99 | 120 | 0.4351 | 0.8586 | 0.8573 | 0.8586 | 0.8555 | | 0.0956 | 6.99 | 140 | 0.4415 | 0.8547 | 0.8542 | 0.8547 | 0.8541 | | 0.079 | 7.99 | 160 | 0.5312 | 0.8501 | 0.8475 | 0.8501 | 0.8459 | | 0.0635 | 8.99 | 180 | 0.5376 | 0.8601 | 0.8578 | 0.8601 | 0.8577 | | 0.0593 | 9.99 | 200 | 0.5060 | 0.8609 | 0.8615 | 0.8609 | 0.8604 | | 0.0656 | 10.99 | 220 | 0.4997 | 0.8617 | 0.8573 | 0.8617 | 0.8587 | | 0.0561 | 11.99 | 240 | 0.5430 | 0.8586 | 0.8604 | 0.8586 | 0.8589 | | 0.0523 | 12.99 | 260 | 0.5354 | 0.8624 | 0.8643 | 0.8624 | 0.8626 | | 0.0489 | 13.99 | 280 | 0.5539 | 0.8609 | 0.8572 | 0.8609 | 0.8577 | | 0.0487 | 14.99 | 300 | 0.5785 | 0.8609 | 0.8591 | 0.8609 | 0.8591 | | 0.0485 | 15.99 | 320 | 0.6186 | 0.8601 | 0.8578 | 0.8601 | 0.8573 | | 0.0518 | 16.99 | 340 | 0.6342 | 0.8624 | 0.8612 | 0.8624 | 0.8606 | | 0.0432 | 17.99 | 360 | 0.6302 | 0.8586 | 0.8598 | 0.8586 | 0.8580 | | 0.0469 | 18.99 | 380 | 0.6323 | 0.8617 | 0.8606 | 0.8617 | 0.8604 | | 0.0426 | 19.99 | 400 | 0.6502 | 0.8601 | 0.8582 | 0.8601 | 0.8583 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/cvt-13-384-22k-fv-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cvt-13-384-22k-fv-finetuned-memes This model is a fine-tuned version of [microsoft/cvt-13-384-22k](https://huggingface.co/microsoft/cvt-13-384-22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.5761 - Accuracy: 0.8315 - Precision: 0.8302 - Recall: 0.8315 - F1: 0.8292 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.3821 | 0.99 | 20 | 1.2780 | 0.4969 | 0.5083 | 0.4969 | 0.4458 | | 1.0785 | 1.99 | 40 | 0.8633 | 0.6669 | 0.6658 | 0.6669 | 0.6500 | | 0.8862 | 2.99 | 60 | 0.7110 | 0.7218 | 0.7258 | 0.7218 | 0.7013 | | 0.665 | 3.99 | 80 | 0.5515 | 0.8045 | 0.8137 | 0.8045 | 0.8050 | | 0.6056 | 4.99 | 100 | 0.5956 | 0.7960 | 0.8041 | 0.7960 | 0.7846 | | 0.4779 | 5.99 | 120 | 0.6229 | 0.7937 | 0.7945 | 0.7937 | 0.7857 | | 0.4554 | 6.99 | 140 | 0.5355 | 0.8099 | 0.8126 | 0.8099 | 0.8086 | | 0.4249 | 7.99 | 160 | 0.5447 | 0.8269 | 0.8275 | 0.8269 | 0.8236 | | 0.4313 | 8.99 | 180 | 0.5530 | 0.8153 | 0.8140 | 0.8153 | 0.8132 | | 0.423 | 9.99 | 200 | 0.5346 | 0.8238 | 0.8230 | 0.8238 | 0.8223 | | 0.3997 | 10.99 | 220 | 0.5413 | 0.8338 | 0.8347 | 0.8338 | 0.8338 | | 0.4095 | 11.99 | 240 | 0.5999 | 0.8207 | 0.8231 | 0.8207 | 0.8177 | | 0.3979 | 12.99 | 260 | 0.5632 | 0.8284 | 0.8255 | 0.8284 | 0.8250 | | 0.3408 | 13.99 | 280 | 0.5725 | 0.8207 | 0.8198 | 0.8207 | 0.8196 | | 0.3828 | 14.99 | 300 | 0.5631 | 0.8277 | 0.8258 | 0.8277 | 0.8260 | | 0.3595 | 15.99 | 320 | 0.6005 | 0.8308 | 0.8297 | 0.8308 | 0.8275 | | 0.3789 | 16.99 | 340 | 0.5840 | 0.8300 | 0.8271 | 0.8300 | 0.8273 | | 0.3545 | 17.99 | 360 | 0.5983 | 0.8246 | 0.8226 | 0.8246 | 0.8222 | | 0.3472 | 18.99 | 380 | 0.5795 | 0.8416 | 0.8382 | 0.8416 | 0.8390 | | 0.355 | 19.99 | 400 | 0.5761 | 0.8315 | 0.8302 | 0.8315 | 0.8292 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/vit-base-patch16-224-FV2-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-FV2-finetuned-memes This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.5458 - Accuracy: 0.8648 - Precision: 0.8651 - Recall: 0.8648 - F1: 0.8646 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 0.994 | 0.99 | 20 | 0.7937 | 0.7257 | 0.7148 | 0.7257 | 0.7025 | | 0.509 | 1.99 | 40 | 0.4634 | 0.8346 | 0.8461 | 0.8346 | 0.8303 | | 0.2698 | 2.99 | 60 | 0.3851 | 0.8594 | 0.8619 | 0.8594 | 0.8586 | | 0.1381 | 3.99 | 80 | 0.4186 | 0.8624 | 0.8716 | 0.8624 | 0.8634 | | 0.0899 | 4.99 | 100 | 0.4038 | 0.8586 | 0.8624 | 0.8586 | 0.8594 | | 0.0708 | 5.99 | 120 | 0.4170 | 0.8563 | 0.8612 | 0.8563 | 0.8580 | | 0.0629 | 6.99 | 140 | 0.4414 | 0.8594 | 0.8599 | 0.8594 | 0.8585 | | 0.0554 | 7.99 | 160 | 0.4617 | 0.8539 | 0.8563 | 0.8539 | 0.8550 | | 0.0582 | 8.99 | 180 | 0.4712 | 0.8648 | 0.8667 | 0.8648 | 0.8651 | | 0.0582 | 9.99 | 200 | 0.4753 | 0.8632 | 0.8647 | 0.8632 | 0.8636 | | 0.0535 | 10.99 | 220 | 0.4653 | 0.8694 | 0.8690 | 0.8694 | 0.8684 | | 0.0516 | 11.99 | 240 | 0.4937 | 0.8679 | 0.8692 | 0.8679 | 0.8681 | | 0.0478 | 12.99 | 260 | 0.5109 | 0.8725 | 0.8741 | 0.8725 | 0.8718 | | 0.0484 | 13.99 | 280 | 0.5144 | 0.8640 | 0.8660 | 0.8640 | 0.8647 | | 0.0472 | 14.99 | 300 | 0.5249 | 0.8679 | 0.8688 | 0.8679 | 0.8678 | | 0.043 | 15.99 | 320 | 0.5324 | 0.8709 | 0.8711 | 0.8709 | 0.8704 | | 0.0473 | 16.99 | 340 | 0.5352 | 0.8648 | 0.8660 | 0.8648 | 0.8647 | | 0.0502 | 17.99 | 360 | 0.5389 | 0.8694 | 0.8692 | 0.8694 | 0.8687 | | 0.0489 | 18.99 | 380 | 0.5564 | 0.8648 | 0.8666 | 0.8648 | 0.8651 | | 0.04 | 19.99 | 400 | 0.5458 | 0.8648 | 0.8651 | 0.8648 | 0.8646 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/swin-base-patch4-window7-224-in22k-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-base-patch4-window7-224-in22k-finetuned-memes This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224-in22k](https://huggingface.co/microsoft/swin-base-patch4-window7-224-in22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.7094 - Accuracy: 0.8563 - Precision: 0.8546 - Recall: 0.8563 - F1: 0.8552 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.1655 | 0.99 | 20 | 0.8573 | 0.6955 | 0.6953 | 0.6955 | 0.6683 | | 0.5506 | 1.99 | 40 | 0.5327 | 0.8083 | 0.8050 | 0.8083 | 0.7963 | | 0.3573 | 2.99 | 60 | 0.4497 | 0.8338 | 0.8339 | 0.8338 | 0.8317 | | 0.2083 | 3.99 | 80 | 0.4561 | 0.8354 | 0.8450 | 0.8354 | 0.8368 | | 0.1545 | 4.99 | 100 | 0.4605 | 0.8423 | 0.8458 | 0.8423 | 0.8430 | | 0.1014 | 5.99 | 120 | 0.4924 | 0.8524 | 0.8571 | 0.8524 | 0.8538 | | 0.0854 | 6.99 | 140 | 0.5759 | 0.8393 | 0.8452 | 0.8393 | 0.8400 | | 0.1012 | 7.99 | 160 | 0.5142 | 0.8362 | 0.8378 | 0.8362 | 0.8361 | | 0.077 | 8.99 | 180 | 0.5647 | 0.8331 | 0.8538 | 0.8331 | 0.8407 | | 0.0667 | 9.99 | 200 | 0.5294 | 0.8462 | 0.8509 | 0.8462 | 0.8483 | | 0.0666 | 10.99 | 220 | 0.6038 | 0.8385 | 0.8415 | 0.8385 | 0.8396 | | 0.0574 | 11.99 | 240 | 0.6384 | 0.8408 | 0.8431 | 0.8408 | 0.8411 | | 0.0488 | 12.99 | 260 | 0.6305 | 0.8516 | 0.8561 | 0.8516 | 0.8532 | | 0.0524 | 13.99 | 280 | 0.6411 | 0.8509 | 0.8526 | 0.8509 | 0.8510 | | 0.0511 | 14.99 | 300 | 0.6462 | 0.8547 | 0.8542 | 0.8547 | 0.8543 | | 0.0495 | 15.99 | 320 | 0.6869 | 0.8532 | 0.8534 | 0.8532 | 0.8527 | | 0.0412 | 16.99 | 340 | 0.6643 | 0.8578 | 0.8554 | 0.8578 | 0.8564 | | 0.0411 | 17.99 | 360 | 0.7214 | 0.8570 | 0.8539 | 0.8570 | 0.8552 | | 0.0434 | 18.99 | 380 | 0.7037 | 0.8524 | 0.8507 | 0.8524 | 0.8514 | | 0.0394 | 19.99 | 400 | 0.7094 | 0.8563 | 0.8546 | 0.8563 | 0.8552 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/convnext-large-224-22k-1k-FV2-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-large-224-22k-1k-FV2-finetuned-memes This model is a fine-tuned version of [facebook/convnext-large-224-22k-1k](https://huggingface.co/facebook/convnext-large-224-22k-1k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4290 - Accuracy: 0.8663 - Precision: 0.8617 - Recall: 0.8663 - F1: 0.8629 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 0.8992 | 0.99 | 20 | 0.6455 | 0.7658 | 0.7512 | 0.7658 | 0.7534 | | 0.4245 | 1.99 | 40 | 0.4008 | 0.8539 | 0.8680 | 0.8539 | 0.8541 | | 0.2054 | 2.99 | 60 | 0.3245 | 0.8694 | 0.8631 | 0.8694 | 0.8650 | | 0.1102 | 3.99 | 80 | 0.3231 | 0.8671 | 0.8624 | 0.8671 | 0.8645 | | 0.0765 | 4.99 | 100 | 0.3882 | 0.8563 | 0.8603 | 0.8563 | 0.8556 | | 0.0642 | 5.99 | 120 | 0.4133 | 0.8601 | 0.8604 | 0.8601 | 0.8598 | | 0.0574 | 6.99 | 140 | 0.3889 | 0.8694 | 0.8657 | 0.8694 | 0.8667 | | 0.0526 | 7.99 | 160 | 0.4145 | 0.8655 | 0.8705 | 0.8655 | 0.8670 | | 0.0468 | 8.99 | 180 | 0.4256 | 0.8679 | 0.8642 | 0.8679 | 0.8650 | | 0.0472 | 9.99 | 200 | 0.4290 | 0.8663 | 0.8617 | 0.8663 | 0.8629 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/resnet152-FV-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet152-FV-finetuned-memes This model is a fine-tuned version of [microsoft/resnet-152](https://huggingface.co/microsoft/resnet-152) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6772 - Accuracy: 0.7558 - Precision: 0.7557 - Recall: 0.7558 - F1: 0.7546 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.5739 | 0.99 | 20 | 1.5427 | 0.4521 | 0.3131 | 0.4521 | 0.2880 | | 1.4353 | 1.99 | 40 | 1.3786 | 0.4490 | 0.3850 | 0.4490 | 0.2791 | | 1.3026 | 2.99 | 60 | 1.2734 | 0.4799 | 0.3073 | 0.4799 | 0.3393 | | 1.1579 | 3.99 | 80 | 1.1378 | 0.5278 | 0.4300 | 0.5278 | 0.4143 | | 1.0276 | 4.99 | 100 | 1.0231 | 0.5734 | 0.4497 | 0.5734 | 0.4865 | | 0.8826 | 5.99 | 120 | 0.9228 | 0.6252 | 0.5983 | 0.6252 | 0.5637 | | 0.766 | 6.99 | 140 | 0.8441 | 0.6662 | 0.6474 | 0.6662 | 0.6320 | | 0.6732 | 7.99 | 160 | 0.8009 | 0.6901 | 0.6759 | 0.6901 | 0.6704 | | 0.5653 | 8.99 | 180 | 0.7535 | 0.7218 | 0.7141 | 0.7218 | 0.7129 | | 0.4957 | 9.99 | 200 | 0.7317 | 0.7257 | 0.7248 | 0.7257 | 0.7200 | | 0.4534 | 10.99 | 220 | 0.6808 | 0.7434 | 0.7405 | 0.7434 | 0.7390 | | 0.3792 | 11.99 | 240 | 0.6949 | 0.7450 | 0.7454 | 0.7450 | 0.7399 | | 0.3489 | 12.99 | 260 | 0.6746 | 0.7496 | 0.7511 | 0.7496 | 0.7474 | | 0.3113 | 13.99 | 280 | 0.6637 | 0.7573 | 0.7638 | 0.7573 | 0.7579 | | 0.2947 | 14.99 | 300 | 0.6451 | 0.7589 | 0.7667 | 0.7589 | 0.7610 | | 0.2776 | 15.99 | 320 | 0.6754 | 0.7543 | 0.7565 | 0.7543 | 0.7525 | | 0.2611 | 16.99 | 340 | 0.6808 | 0.7550 | 0.7607 | 0.7550 | 0.7529 | | 0.2428 | 17.99 | 360 | 0.7005 | 0.7457 | 0.7497 | 0.7457 | 0.7404 | | 0.2346 | 18.99 | 380 | 0.6597 | 0.7573 | 0.7642 | 0.7573 | 0.7590 | | 0.2367 | 19.99 | 400 | 0.6772 | 0.7558 | 0.7557 | 0.7558 | 0.7546 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/cvt-13-384-in22k-FV-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cvt-13-384-in22k-FV-finetuned-memes This model is a fine-tuned version of [microsoft/cvt-13-384-22k](https://huggingface.co/microsoft/cvt-13-384-22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.5595 - Accuracy: 0.8346 - Precision: 0.8327 - Recall: 0.8346 - F1: 0.8322 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.4066 | 0.99 | 20 | 1.2430 | 0.5124 | 0.5141 | 0.5124 | 0.4371 | | 1.0813 | 1.99 | 40 | 0.8244 | 0.6893 | 0.6834 | 0.6893 | 0.6616 | | 0.8392 | 2.99 | 60 | 0.6334 | 0.7612 | 0.7670 | 0.7612 | 0.7570 | | 0.7065 | 3.99 | 80 | 0.5819 | 0.7767 | 0.7799 | 0.7767 | 0.7672 | | 0.5751 | 4.99 | 100 | 0.5365 | 0.8176 | 0.8216 | 0.8176 | 0.8130 | | 0.4896 | 5.99 | 120 | 0.4943 | 0.8308 | 0.8257 | 0.8308 | 0.8265 | | 0.4487 | 6.99 | 140 | 0.5399 | 0.8107 | 0.8069 | 0.8107 | 0.8054 | | 0.4349 | 7.99 | 160 | 0.4892 | 0.8300 | 0.8285 | 0.8300 | 0.8273 | | 0.43 | 8.99 | 180 | 0.4984 | 0.8454 | 0.8465 | 0.8454 | 0.8426 | | 0.4372 | 9.99 | 200 | 0.5573 | 0.8192 | 0.8221 | 0.8192 | 0.8157 | | 0.3994 | 10.99 | 220 | 0.5158 | 0.8300 | 0.8284 | 0.8300 | 0.8281 | | 0.3883 | 11.99 | 240 | 0.5495 | 0.8354 | 0.8317 | 0.8354 | 0.8314 | | 0.406 | 12.99 | 260 | 0.5298 | 0.8284 | 0.8285 | 0.8284 | 0.8246 | | 0.3355 | 13.99 | 280 | 0.5401 | 0.8393 | 0.8346 | 0.8393 | 0.8357 | | 0.395 | 14.99 | 300 | 0.5915 | 0.8308 | 0.8278 | 0.8308 | 0.8261 | | 0.3612 | 15.99 | 320 | 0.5852 | 0.8408 | 0.8378 | 0.8408 | 0.8368 | | 0.3765 | 16.99 | 340 | 0.5509 | 0.8385 | 0.8351 | 0.8385 | 0.8356 | | 0.3688 | 17.99 | 360 | 0.5668 | 0.8416 | 0.8398 | 0.8416 | 0.8387 | | 0.3503 | 18.99 | 380 | 0.5626 | 0.8393 | 0.8371 | 0.8393 | 0.8365 | | 0.3611 | 19.99 | 400 | 0.5595 | 0.8346 | 0.8327 | 0.8346 | 0.8322 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/mit-b2-VF2-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mit-b2-VF2-finetuned-memes This model is a fine-tuned version of [nvidia/mit-b2](https://huggingface.co/nvidia/mit-b2) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6547 - Accuracy: 0.8308 - Precision: 0.8272 - Recall: 0.8308 - F1: 0.8287 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.3077 | 0.99 | 20 | 1.1683 | 0.5549 | 0.5621 | 0.5549 | 0.5286 | | 0.9359 | 1.99 | 40 | 0.8573 | 0.6731 | 0.6807 | 0.6731 | 0.6535 | | 0.7219 | 2.99 | 60 | 0.7106 | 0.7272 | 0.7359 | 0.7272 | 0.7246 | | 0.6013 | 3.99 | 80 | 0.6445 | 0.7550 | 0.7686 | 0.7550 | 0.7558 | | 0.5243 | 4.99 | 100 | 0.6717 | 0.7573 | 0.8077 | 0.7573 | 0.7584 | | 0.4409 | 5.99 | 120 | 0.5315 | 0.8068 | 0.8027 | 0.8068 | 0.7989 | | 0.3325 | 6.99 | 140 | 0.5159 | 0.8230 | 0.8236 | 0.8230 | 0.8158 | | 0.2719 | 7.99 | 160 | 0.5250 | 0.8215 | 0.8227 | 0.8215 | 0.8202 | | 0.242 | 8.99 | 180 | 0.5087 | 0.8277 | 0.8260 | 0.8277 | 0.8268 | | 0.2247 | 9.99 | 200 | 0.5313 | 0.8215 | 0.8275 | 0.8215 | 0.8218 | | 0.1955 | 10.99 | 220 | 0.6167 | 0.8130 | 0.8062 | 0.8130 | 0.8073 | | 0.1567 | 11.99 | 240 | 0.5859 | 0.8168 | 0.8185 | 0.8168 | 0.8173 | | 0.1479 | 12.99 | 260 | 0.5938 | 0.8215 | 0.8169 | 0.8215 | 0.8178 | | 0.1241 | 13.99 | 280 | 0.6187 | 0.8261 | 0.8234 | 0.8261 | 0.8239 | | 0.1114 | 14.99 | 300 | 0.6419 | 0.8261 | 0.8351 | 0.8261 | 0.8293 | | 0.1022 | 15.99 | 320 | 0.6322 | 0.8323 | 0.8284 | 0.8323 | 0.8294 | | 0.0941 | 16.99 | 340 | 0.6595 | 0.8269 | 0.8266 | 0.8269 | 0.8263 | | 0.0935 | 17.99 | 360 | 0.6674 | 0.8269 | 0.8218 | 0.8269 | 0.8237 | | 0.089 | 18.99 | 380 | 0.6533 | 0.8253 | 0.8222 | 0.8253 | 0.8235 | | 0.0794 | 19.99 | 400 | 0.6547 | 0.8308 | 0.8272 | 0.8308 | 0.8287 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/resnet-50-FV2-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet-50-FV2-finetuned-memes This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.9263 - Accuracy: 0.6453 - Precision: 0.5728 - Recall: 0.6453 - F1: 0.5964 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.5763 | 0.99 | 20 | 1.5575 | 0.4281 | 0.2966 | 0.4281 | 0.2669 | | 1.4761 | 1.99 | 40 | 1.4424 | 0.4343 | 0.1886 | 0.4343 | 0.2630 | | 1.3563 | 2.99 | 60 | 1.3240 | 0.4343 | 0.1886 | 0.4343 | 0.2630 | | 1.2824 | 3.99 | 80 | 1.2636 | 0.4389 | 0.3097 | 0.4389 | 0.2734 | | 1.2315 | 4.99 | 100 | 1.2119 | 0.4529 | 0.3236 | 0.4529 | 0.3042 | | 1.1956 | 5.99 | 120 | 1.1764 | 0.4900 | 0.3731 | 0.4900 | 0.3692 | | 1.1452 | 6.99 | 140 | 1.1424 | 0.5147 | 0.3963 | 0.5147 | 0.4090 | | 1.1076 | 7.99 | 160 | 1.1190 | 0.5371 | 0.4121 | 0.5371 | 0.4392 | | 1.0679 | 8.99 | 180 | 1.0825 | 0.5719 | 0.4465 | 0.5719 | 0.4831 | | 1.0432 | 9.99 | 200 | 1.0482 | 0.5750 | 0.5404 | 0.5750 | 0.4930 | | 0.9903 | 10.99 | 220 | 1.0275 | 0.5958 | 0.5459 | 0.5958 | 0.5241 | | 0.9675 | 11.99 | 240 | 1.0145 | 0.6051 | 0.5350 | 0.6051 | 0.5379 | | 0.9335 | 12.99 | 260 | 0.9860 | 0.6175 | 0.5537 | 0.6175 | 0.5527 | | 0.9157 | 13.99 | 280 | 0.9683 | 0.6105 | 0.5386 | 0.6105 | 0.5504 | | 0.8901 | 14.99 | 300 | 0.9558 | 0.6352 | 0.5686 | 0.6352 | 0.5833 | | 0.8722 | 15.99 | 320 | 0.9382 | 0.6345 | 0.5657 | 0.6345 | 0.5807 | | 0.854 | 16.99 | 340 | 0.9322 | 0.6376 | 0.5623 | 0.6376 | 0.5856 | | 0.8494 | 17.99 | 360 | 0.9287 | 0.6422 | 0.6675 | 0.6422 | 0.5918 | | 0.8652 | 18.99 | 380 | 0.9212 | 0.6399 | 0.5640 | 0.6399 | 0.5863 | | 0.846 | 19.99 | 400 | 0.9263 | 0.6453 | 0.5728 | 0.6453 | 0.5964 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jayanta/deit-base-patch16-224-FV-finetuned-memes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deit-base-patch16-224-FV-finetuned-memes This model is a fine-tuned version of [facebook/deit-base-patch16-224](https://huggingface.co/facebook/deit-base-patch16-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6769 - Accuracy: 0.8485 - Precision: 0.8458 - Recall: 0.8485 - F1: 0.8464 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.2733 | 0.99 | 20 | 1.0893 | 0.5811 | 0.5790 | 0.5811 | 0.5293 | | 0.7284 | 1.99 | 40 | 0.7351 | 0.7210 | 0.7642 | 0.7210 | 0.7271 | | 0.4267 | 2.99 | 60 | 0.5202 | 0.7991 | 0.8104 | 0.7991 | 0.8033 | | 0.2181 | 3.99 | 80 | 0.4605 | 0.8346 | 0.8351 | 0.8346 | 0.8334 | | 0.1504 | 4.99 | 100 | 0.5281 | 0.8253 | 0.8281 | 0.8253 | 0.8266 | | 0.1001 | 5.99 | 120 | 0.4945 | 0.8369 | 0.8336 | 0.8369 | 0.8347 | | 0.0874 | 6.99 | 140 | 0.5902 | 0.8338 | 0.8370 | 0.8338 | 0.8348 | | 0.0634 | 7.99 | 160 | 0.6088 | 0.8253 | 0.8221 | 0.8253 | 0.8234 | | 0.0699 | 8.99 | 180 | 0.6210 | 0.8207 | 0.8202 | 0.8207 | 0.8186 | | 0.0661 | 9.99 | 200 | 0.5675 | 0.8385 | 0.8417 | 0.8385 | 0.8393 | | 0.0592 | 10.99 | 220 | 0.6550 | 0.8253 | 0.8324 | 0.8253 | 0.8275 | | 0.0559 | 11.99 | 240 | 0.6400 | 0.8416 | 0.8370 | 0.8416 | 0.8387 | | 0.0501 | 12.99 | 260 | 0.6726 | 0.8393 | 0.8353 | 0.8393 | 0.8350 | | 0.0529 | 13.99 | 280 | 0.6285 | 0.8408 | 0.8399 | 0.8408 | 0.8401 | | 0.0478 | 14.99 | 300 | 0.6423 | 0.8400 | 0.8380 | 0.8400 | 0.8384 | | 0.0458 | 15.99 | 320 | 0.6632 | 0.8369 | 0.8337 | 0.8369 | 0.8348 | | 0.048 | 16.99 | 340 | 0.6719 | 0.8423 | 0.8401 | 0.8423 | 0.8404 | | 0.0417 | 17.99 | 360 | 0.6807 | 0.8423 | 0.8415 | 0.8423 | 0.8408 | | 0.0461 | 18.99 | 380 | 0.6732 | 0.8454 | 0.8440 | 0.8454 | 0.8438 | | 0.044 | 19.99 | 400 | 0.6769 | 0.8485 | 0.8458 | 0.8485 | 0.8464 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1.dev0 - Tokenizers 0.13.1
[ "bollywood memes", "industrialist", "political memes", "singer memes", "sports memes" ]
jungjongho/vit-base-DogSick
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-DogSick This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.3041 - Acc: {'accuracy': 0.6102564102564103} - F1: {'f1': 0.5980148081337936} ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Acc | F1 | |:-------------:|:-----:|:----:|:---------------:|:---------------------------------:|:--------------------------:| | 2.4055 | 0.61 | 50 | 2.2086 | {'accuracy': 0.41794871794871796} | {'f1': 0.3246788612052483} | | 2.0379 | 1.22 | 100 | 1.9233 | {'accuracy': 0.4846153846153846} | {'f1': 0.4386383497855148} | | 1.7287 | 1.83 | 150 | 1.7200 | {'accuracy': 0.5256410256410257} | {'f1': 0.4806042289317683} | | 1.4667 | 2.44 | 200 | 1.6021 | {'accuracy': 0.5692307692307692} | {'f1': 0.533374137436958} | | 1.3444 | 3.05 | 250 | 1.5410 | {'accuracy': 0.5333333333333333} | {'f1': 0.4846134797922835} | | 1.1334 | 3.66 | 300 | 1.4674 | {'accuracy': 0.5743589743589743} | {'f1': 0.5533432367508125} | | 1.007 | 4.27 | 350 | 1.4096 | {'accuracy': 0.5923076923076923} | {'f1': 0.5801847507206119} | | 0.897 | 4.88 | 400 | 1.3674 | {'accuracy': 0.6} | {'f1': 0.5903283954748092} | | 0.7326 | 5.49 | 450 | 1.3359 | {'accuracy': 0.5923076923076923} | {'f1': 0.5793036546532927} | | 0.7105 | 6.1 | 500 | 1.3259 | {'accuracy': 0.6153846153846154} | {'f1': 0.6064330281486513} | | 0.6164 | 6.71 | 550 | 1.3183 | {'accuracy': 0.6102564102564103} | {'f1': 0.6014695572651212} | | 0.5804 | 7.32 | 600 | 1.3103 | {'accuracy': 0.6025641025641025} | {'f1': 0.5965366941171513} | | 0.5313 | 7.93 | 650 | 1.3041 | {'accuracy': 0.6102564102564103} | {'f1': 0.5980148081337936} | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "결막염", "궤양성각막질환", "유루증", "정상", "핵경화", "백내장 초기", "비궤양성각막질환", "비성숙", "색소침착성각막염", "성숙", "안검내반증", "안검염", "안검종양" ]
SergioVillanueva/autotrain-person-intruder-classification-1840363138
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1840363138 - CO2 Emissions (in grams): 0.5268 ## Validation Metrics - Loss: 0.464 - Accuracy: 0.818 - Precision: 0.778 - Recall: 1.000 - AUC: 1.000 - F1: 0.875
[ "intruder", "worker" ]
Owos/tb-classifier
# Tuberculosis Classifier [Github repo is here](https://github.com/owos/tb_project) </br> [HuggingFace Space](https://huggingface.co/spaces/Owos/tb_prediction_space) # Model description This is a computer vision model that was built with TensorFlow to classify if a given x-ray scan is positive for Tuberculosis or not. # Intended uses & limitations The model was built to help support low-resourced and short-staffed primary healthcare centers in Nigeria. Particularly, the aim to was created a computer-aided diagnosing tool for Radiologists in these centers. The model has not undergone clinical testing and usage is at ueser's own risk.The model has however been tested on real life data images that are positive for tuberculosis # How to use Download the pre-trained model and use it to make inference. A space has been created for testing [here](space.com) # Training data The entire dataset consist of 3500 negative images and 700 positive TB images. </br> The data was splitted in 80% for training and 20% for validation. # Training procedure Transfer-learning was employed using InceptionV3 as the pre-trained model. Training was done for 20 epochs and the classes were weighted during training in order to neutralize the imbalanced class in the dataset. The training was done on Kaggle using the GPUs provided. More details of the experiments can be found [here](https://www.kaggle.com/code/abrahamowodunni/tb-project) # Evaluation results The result of the evaluation are as follows: - loss: 0.0923 - binary_accuracy: 0.9857 - precision: 0.9259 - recall: 0.9843 More information can be found in the plot below. [Evaluation results of the TB model](https://github.com/owos/tb_project/blob/main/README.md)
[ "negative", "positive" ]
kem000123/autotrain-cat_vs_dogs-1858163503
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1858163503 - CO2 Emissions (in grams): 0.7951 ## Validation Metrics - Loss: 0.007 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "cat", "dog" ]
hagerty7/recyclable-materials-classification
ViT for Recyclable Material Classification
[ "cardboard", "glass", "metal", "paper", "plastic", "trash" ]
YoussefSaad/out
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # out This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.1475 - Accuracy: 0.9587 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1402 | 1.39 | 100 | 0.2101 | 0.9371 | | 0.0538 | 2.78 | 200 | 0.1529 | 0.9548 | | 0.0164 | 4.17 | 300 | 0.1475 | 0.9587 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "blouse", "crop top", "sweatshirt", "t shirt" ]
valadhi/swin-tiny-patch4-window7-224-large-dataset-varicropped
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-large-dataset-varicropped This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.3554 - Accuracy: 0.6571 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.8523 | 0.99 | 88 | 1.8136 | 0.3771 | | 0.0725 | 1.99 | 176 | 1.2359 | 0.6006 | | 0.0397 | 2.99 | 264 | 1.1716 | 0.6014 | | 0.0179 | 3.99 | 352 | 1.5688 | 0.5704 | | 0.0173 | 4.99 | 440 | 1.3718 | 0.6237 | | 0.0097 | 5.99 | 528 | 1.3841 | 0.5927 | | 0.0109 | 6.99 | 616 | 1.4044 | 0.5895 | | 0.0019 | 7.99 | 704 | 1.2936 | 0.6150 | | 0.002 | 8.99 | 792 | 1.4264 | 0.5760 | | 0.0035 | 9.99 | 880 | 1.2226 | 0.6396 | | 0.0025 | 10.99 | 968 | 1.1553 | 0.6635 | | 0.0009 | 11.99 | 1056 | 1.1727 | 0.6643 | | 0.0037 | 12.99 | 1144 | 1.1182 | 0.6714 | | 0.0017 | 13.99 | 1232 | 1.4015 | 0.6364 | | 0.0009 | 14.99 | 1320 | 1.2955 | 0.6683 | | 0.0002 | 15.99 | 1408 | 1.2310 | 0.6555 | | 0.0007 | 16.99 | 1496 | 1.3849 | 0.6325 | | 0.001 | 17.99 | 1584 | 1.4312 | 0.6102 | | 0.0001 | 18.99 | 1672 | 1.5087 | 0.6181 | | 0.0002 | 19.99 | 1760 | 1.7247 | 0.6062 | | 0.0016 | 20.99 | 1848 | 1.5534 | 0.6237 | | 0.0004 | 21.99 | 1936 | 1.5382 | 0.6333 | | 0.0008 | 22.99 | 2024 | 1.4910 | 0.6484 | | 0.0008 | 23.99 | 2112 | 1.5020 | 0.6380 | | 0.0005 | 24.99 | 2200 | 1.4788 | 0.6468 | | 0.001 | 25.99 | 2288 | 1.3416 | 0.6770 | | 0.003 | 26.99 | 2376 | 1.2643 | 0.6738 | | 0.0001 | 27.99 | 2464 | 1.3582 | 0.6595 | | 0.0 | 28.99 | 2552 | 1.3767 | 0.6523 | | 0.0 | 29.99 | 2640 | 1.3554 | 0.6571 | ### Framework versions - Transformers 4.21.1 - Pytorch 1.12.1 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "ardei", "castravete", "ceapa", "dovlecel", "gulie", "loboda", "mărar", "ridiche", "salata", "spanac", "tomate", "usturoi", "varza", "vinete", "țelină" ]
YoussefSaad/dresses
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # dresses This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4588 - Accuracy: 0.9014 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2458 | 1.23 | 100 | 0.4519 | 0.8633 | | 0.0937 | 2.47 | 200 | 0.4285 | 0.8754 | | 0.0802 | 3.7 | 300 | 0.4683 | 0.8754 | | 0.041 | 4.94 | 400 | 0.4088 | 0.9031 | | 0.0277 | 6.17 | 500 | 0.3979 | 0.8945 | | 0.0459 | 7.41 | 600 | 0.4253 | 0.9014 | | 0.024 | 8.64 | 700 | 0.4680 | 0.8893 | | 0.0267 | 9.88 | 800 | 0.4575 | 0.8945 | | 0.019 | 11.11 | 900 | 0.4470 | 0.8893 | | 0.0235 | 12.35 | 1000 | 0.4380 | 0.9066 | | 0.0129 | 13.58 | 1100 | 0.4557 | 0.9048 | | 0.0211 | 14.81 | 1200 | 0.4588 | 0.9014 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "a line dress", "blazer dress", "bodycon dress", "shirt dress", "slip dress", "t shirt dress" ]
juliensimon/swin-food102
# swin-food102 This model is a fine-tuned version of [juliensimon/autotrain-food101-1471154053](https://huggingface.co/juliensimon/autotrain-food101-1471154053) on the [food102](https://huggingface.co/datasets/juliensimon/food102) dataset, namely the [food101](https://huggingface.co/datasets/food101) dataset with an extra class generated with a Stable Diffusion model. A detailed walk-through is available on [YouTube](https://youtu.be/sIe0eo3fYQ4). The achieves the following results on the evaluation set: - Loss: 0.2510 - Accuracy: 0.9338 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1648 | 1.0 | 597 | 0.3118 | 0.9218 | | 0.31 | 2.0 | 1194 | 0.2606 | 0.9322 | | 0.2488 | 3.0 | 1791 | 0.2510 | 0.9338 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu102 - Datasets 2.4.0 - Tokenizers 0.13.1
[ "apple_pie", "baby_back_ribs", "breakfast_burrito", "tuna_tartare", "waffles", "bruschetta", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheese_plate", "cheesecake", "chicken_curry", "baklava", "chicken_quesadilla", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "beef_carpaccio", "cup_cakes", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "beef_tartare", "foie_gras", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "beet_salad", "grilled_cheese_sandwich", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "beignets", "lasagna", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "bibimbap", "oysters", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "boeuf_bourguignon", "prime_rib", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "bread_pudding", "shrimp_and_grits", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu" ]
Karelito00/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0501 - Accuracy: 0.9822 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3259 | 1.0 | 379 | 0.0760 | 0.9763 | | 0.1882 | 2.0 | 758 | 0.0694 | 0.9778 | | 0.1563 | 3.0 | 1137 | 0.0501 | 0.9822 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
sergiocannata/dit-base-finetuned-brs
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # dit-base-finetuned-brs This model is a fine-tuned version of [microsoft/dit-base](https://huggingface.co/microsoft/dit-base) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.8748 - Accuracy: 0.8824 - F1: 0.8571 - Precision (ppv): 0.8571 - Recall (sensitivity): 0.8571 - Specificity: 0.9 - Npv: 0.9 - Auc: 0.8786 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision (ppv) | Recall (sensitivity) | Specificity | Npv | Auc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------------:|:--------------------:|:-----------:|:------:|:------:| | 0.6624 | 6.25 | 100 | 0.5548 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.5201 | 12.49 | 200 | 0.4617 | 0.8824 | 0.8571 | 0.8571 | 0.8571 | 0.9 | 0.9 | 0.8786 | | 0.5172 | 18.74 | 300 | 0.4249 | 0.8235 | 0.8000 | 0.75 | 0.8571 | 0.8 | 0.8889 | 0.8286 | | 0.4605 | 24.98 | 400 | 0.3172 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.4894 | 31.25 | 500 | 0.4466 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.3694 | 37.49 | 600 | 0.5077 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.6172 | 43.74 | 700 | 0.5722 | 0.7647 | 0.7143 | 0.7143 | 0.7143 | 0.8 | 0.8 | 0.7571 | | 0.3671 | 49.98 | 800 | 0.7006 | 0.7647 | 0.6667 | 0.8 | 0.5714 | 0.9 | 0.75 | 0.7357 | | 0.4109 | 56.25 | 900 | 0.4410 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.3198 | 62.49 | 1000 | 0.7226 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.4283 | 68.74 | 1100 | 0.8089 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.3273 | 74.98 | 1200 | 0.9059 | 0.7647 | 0.6667 | 0.8 | 0.5714 | 0.9 | 0.75 | 0.7357 | | 0.3237 | 81.25 | 1300 | 0.8520 | 0.8235 | 0.7692 | 0.8333 | 0.7143 | 0.9 | 0.8182 | 0.8071 | | 0.2014 | 87.49 | 1400 | 0.9183 | 0.7647 | 0.6667 | 0.8 | 0.5714 | 0.9 | 0.75 | 0.7357 | | 0.3204 | 93.74 | 1500 | 0.6769 | 0.8824 | 0.8571 | 0.8571 | 0.8571 | 0.9 | 0.9 | 0.8786 | | 0.1786 | 99.98 | 1600 | 0.8748 | 0.8824 | 0.8571 | 0.8571 | 0.8571 | 0.9 | 0.9 | 0.8786 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "event", "no_event" ]
Alex-VisTas/swin-tiny-patch4-window7-224-finetuned-woody_130epochs
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-woody_130epochs This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.4550 - Accuracy: 0.8921 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 130 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6694 | 1.0 | 58 | 0.6370 | 0.6594 | | 0.6072 | 2.0 | 116 | 0.5813 | 0.7030 | | 0.6048 | 3.0 | 174 | 0.5646 | 0.7030 | | 0.5849 | 4.0 | 232 | 0.5778 | 0.6970 | | 0.5671 | 5.0 | 290 | 0.5394 | 0.7236 | | 0.5575 | 6.0 | 348 | 0.5212 | 0.7382 | | 0.568 | 7.0 | 406 | 0.5218 | 0.7358 | | 0.5607 | 8.0 | 464 | 0.5183 | 0.7527 | | 0.5351 | 9.0 | 522 | 0.5138 | 0.7467 | | 0.5459 | 10.0 | 580 | 0.5290 | 0.7394 | | 0.5454 | 11.0 | 638 | 0.5212 | 0.7345 | | 0.5291 | 12.0 | 696 | 0.5130 | 0.7576 | | 0.5378 | 13.0 | 754 | 0.5372 | 0.7503 | | 0.5264 | 14.0 | 812 | 0.6089 | 0.6861 | | 0.4909 | 15.0 | 870 | 0.4852 | 0.7636 | | 0.5591 | 16.0 | 928 | 0.4817 | 0.76 | | 0.4966 | 17.0 | 986 | 0.5673 | 0.6933 | | 0.4988 | 18.0 | 1044 | 0.5131 | 0.7418 | | 0.5339 | 19.0 | 1102 | 0.4998 | 0.7394 | | 0.4804 | 20.0 | 1160 | 0.4655 | 0.7733 | | 0.503 | 21.0 | 1218 | 0.4554 | 0.7685 | | 0.4859 | 22.0 | 1276 | 0.4713 | 0.7770 | | 0.504 | 23.0 | 1334 | 0.4545 | 0.7721 | | 0.478 | 24.0 | 1392 | 0.4658 | 0.7830 | | 0.4759 | 25.0 | 1450 | 0.4365 | 0.8012 | | 0.4686 | 26.0 | 1508 | 0.4452 | 0.7855 | | 0.4668 | 27.0 | 1566 | 0.4427 | 0.7879 | | 0.4615 | 28.0 | 1624 | 0.4439 | 0.7685 | | 0.4588 | 29.0 | 1682 | 0.4378 | 0.7830 | | 0.4588 | 30.0 | 1740 | 0.4229 | 0.7988 | | 0.4296 | 31.0 | 1798 | 0.4188 | 0.7976 | | 0.4208 | 32.0 | 1856 | 0.4316 | 0.7891 | | 0.4481 | 33.0 | 1914 | 0.4331 | 0.7891 | | 0.4253 | 34.0 | 1972 | 0.4524 | 0.7879 | | 0.4117 | 35.0 | 2030 | 0.4570 | 0.7952 | | 0.4405 | 36.0 | 2088 | 0.4307 | 0.7927 | | 0.4154 | 37.0 | 2146 | 0.4257 | 0.8024 | | 0.3962 | 38.0 | 2204 | 0.5077 | 0.7818 | | 0.414 | 39.0 | 2262 | 0.4602 | 0.8012 | | 0.3937 | 40.0 | 2320 | 0.4741 | 0.7770 | | 0.4186 | 41.0 | 2378 | 0.4250 | 0.8 | | 0.4076 | 42.0 | 2436 | 0.4353 | 0.7988 | | 0.3777 | 43.0 | 2494 | 0.4442 | 0.7879 | | 0.3968 | 44.0 | 2552 | 0.4525 | 0.7879 | | 0.377 | 45.0 | 2610 | 0.4198 | 0.7988 | | 0.378 | 46.0 | 2668 | 0.4297 | 0.8097 | | 0.3675 | 47.0 | 2726 | 0.4435 | 0.8085 | | 0.3562 | 48.0 | 2784 | 0.4477 | 0.7952 | | 0.381 | 49.0 | 2842 | 0.4206 | 0.8255 | | 0.3603 | 50.0 | 2900 | 0.4136 | 0.8109 | | 0.3331 | 51.0 | 2958 | 0.4141 | 0.8230 | | 0.3471 | 52.0 | 3016 | 0.4253 | 0.8109 | | 0.346 | 53.0 | 3074 | 0.5203 | 0.8048 | | 0.3481 | 54.0 | 3132 | 0.4288 | 0.8242 | | 0.3411 | 55.0 | 3190 | 0.4416 | 0.8194 | | 0.3275 | 56.0 | 3248 | 0.4149 | 0.8291 | | 0.3067 | 57.0 | 3306 | 0.4623 | 0.8218 | | 0.3166 | 58.0 | 3364 | 0.4432 | 0.8255 | | 0.3294 | 59.0 | 3422 | 0.4599 | 0.8267 | | 0.3146 | 60.0 | 3480 | 0.4266 | 0.8291 | | 0.3091 | 61.0 | 3538 | 0.4318 | 0.8315 | | 0.3277 | 62.0 | 3596 | 0.4252 | 0.8242 | | 0.296 | 63.0 | 3654 | 0.4332 | 0.8436 | | 0.3241 | 64.0 | 3712 | 0.4729 | 0.8194 | | 0.3104 | 65.0 | 3770 | 0.4228 | 0.8448 | | 0.2878 | 66.0 | 3828 | 0.4173 | 0.8388 | | 0.265 | 67.0 | 3886 | 0.4210 | 0.8497 | | 0.3011 | 68.0 | 3944 | 0.4276 | 0.8436 | | 0.2861 | 69.0 | 4002 | 0.4923 | 0.8315 | | 0.2994 | 70.0 | 4060 | 0.4472 | 0.8182 | | 0.276 | 71.0 | 4118 | 0.4541 | 0.8315 | | 0.2796 | 72.0 | 4176 | 0.4218 | 0.8521 | | 0.2727 | 73.0 | 4234 | 0.4053 | 0.8448 | | 0.255 | 74.0 | 4292 | 0.4356 | 0.8376 | | 0.276 | 75.0 | 4350 | 0.4193 | 0.8436 | | 0.261 | 76.0 | 4408 | 0.4484 | 0.8533 | | 0.2416 | 77.0 | 4466 | 0.4722 | 0.8194 | | 0.2602 | 78.0 | 4524 | 0.4431 | 0.8533 | | 0.2591 | 79.0 | 4582 | 0.4269 | 0.8606 | | 0.2613 | 80.0 | 4640 | 0.4335 | 0.8485 | | 0.2555 | 81.0 | 4698 | 0.4269 | 0.8594 | | 0.2832 | 82.0 | 4756 | 0.3968 | 0.8715 | | 0.264 | 83.0 | 4814 | 0.4173 | 0.8703 | | 0.2462 | 84.0 | 4872 | 0.4150 | 0.8606 | | 0.2424 | 85.0 | 4930 | 0.4377 | 0.8630 | | 0.2574 | 86.0 | 4988 | 0.4120 | 0.8679 | | 0.2273 | 87.0 | 5046 | 0.4393 | 0.8533 | | 0.2334 | 88.0 | 5104 | 0.4366 | 0.8630 | | 0.2258 | 89.0 | 5162 | 0.4189 | 0.8630 | | 0.2153 | 90.0 | 5220 | 0.4474 | 0.8630 | | 0.2462 | 91.0 | 5278 | 0.4362 | 0.8642 | | 0.2356 | 92.0 | 5336 | 0.4454 | 0.8715 | | 0.2019 | 93.0 | 5394 | 0.4413 | 0.88 | | 0.209 | 94.0 | 5452 | 0.4410 | 0.8703 | | 0.2201 | 95.0 | 5510 | 0.4323 | 0.8691 | | 0.2245 | 96.0 | 5568 | 0.4999 | 0.8618 | | 0.2178 | 97.0 | 5626 | 0.4612 | 0.8655 | | 0.2163 | 98.0 | 5684 | 0.4340 | 0.8703 | | 0.2228 | 99.0 | 5742 | 0.4504 | 0.8788 | | 0.2151 | 100.0 | 5800 | 0.4602 | 0.8703 | | 0.1988 | 101.0 | 5858 | 0.4414 | 0.8812 | | 0.2227 | 102.0 | 5916 | 0.4392 | 0.8824 | | 0.1772 | 103.0 | 5974 | 0.5069 | 0.8630 | | 0.2199 | 104.0 | 6032 | 0.4648 | 0.8667 | | 0.1936 | 105.0 | 6090 | 0.4806 | 0.8691 | | 0.199 | 106.0 | 6148 | 0.4569 | 0.8764 | | 0.2149 | 107.0 | 6206 | 0.4445 | 0.8739 | | 0.1917 | 108.0 | 6264 | 0.4444 | 0.8727 | | 0.201 | 109.0 | 6322 | 0.4594 | 0.8727 | | 0.1938 | 110.0 | 6380 | 0.4564 | 0.8764 | | 0.1977 | 111.0 | 6438 | 0.4398 | 0.8739 | | 0.1776 | 112.0 | 6496 | 0.4356 | 0.88 | | 0.1939 | 113.0 | 6554 | 0.4412 | 0.8848 | | 0.178 | 114.0 | 6612 | 0.4373 | 0.88 | | 0.1926 | 115.0 | 6670 | 0.4508 | 0.8812 | | 0.1979 | 116.0 | 6728 | 0.4477 | 0.8848 | | 0.1958 | 117.0 | 6786 | 0.4488 | 0.8897 | | 0.189 | 118.0 | 6844 | 0.4553 | 0.8836 | | 0.1838 | 119.0 | 6902 | 0.4605 | 0.8848 | | 0.1755 | 120.0 | 6960 | 0.4463 | 0.8836 | | 0.1958 | 121.0 | 7018 | 0.4474 | 0.8861 | | 0.1857 | 122.0 | 7076 | 0.4550 | 0.8921 | | 0.1466 | 123.0 | 7134 | 0.4494 | 0.8885 | | 0.1751 | 124.0 | 7192 | 0.4560 | 0.8873 | | 0.175 | 125.0 | 7250 | 0.4383 | 0.8897 | | 0.207 | 126.0 | 7308 | 0.4601 | 0.8873 | | 0.1756 | 127.0 | 7366 | 0.4425 | 0.8897 | | 0.1695 | 128.0 | 7424 | 0.4533 | 0.8909 | | 0.1873 | 129.0 | 7482 | 0.4510 | 0.8897 | | 0.1726 | 130.0 | 7540 | 0.4463 | 0.8909 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "normal", "woody" ]
Karelito00/beit-base-patch16-224-pt22k-ft22k-finetuned-mnist
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-mnist This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the mnist dataset. It achieves the following results on the evaluation set: - Loss: 0.0202 - Accuracy: 0.9935 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3376 | 1.0 | 937 | 0.0446 | 0.9855 | | 0.318 | 2.0 | 1874 | 0.0262 | 0.9916 | | 0.2374 | 3.0 | 2811 | 0.0202 | 0.9935 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ]
PKR/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0593 - Accuracy: 0.9815 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2731 | 1.0 | 190 | 0.1128 | 0.9637 | | 0.1862 | 2.0 | 380 | 0.0759 | 0.9759 | | 0.1409 | 3.0 | 570 | 0.0593 | 0.9815 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
PKR/resnet-50-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet-50-finetuned-eurosat This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.7123 - Accuracy: 0.5630 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.7579 | 1.0 | 190 | 1.7123 | 0.5630 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
Alex-VisTas/swin-tiny-patch4-window7-224-finetuned-woody_LeftGR_130epochs
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-woody_LeftGR_130epochs This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.3377 - Accuracy: 0.9047 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 130 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6614 | 1.0 | 61 | 0.6404 | 0.6521 | | 0.5982 | 2.0 | 122 | 0.5548 | 0.7107 | | 0.579 | 3.0 | 183 | 0.5390 | 0.7141 | | 0.5621 | 4.0 | 244 | 0.4920 | 0.7623 | | 0.5567 | 5.0 | 305 | 0.5375 | 0.7313 | | 0.5271 | 6.0 | 366 | 0.5542 | 0.7405 | | 0.5312 | 7.0 | 427 | 0.4573 | 0.7876 | | 0.5477 | 8.0 | 488 | 0.4540 | 0.7784 | | 0.5554 | 9.0 | 549 | 0.4932 | 0.7635 | | 0.5247 | 10.0 | 610 | 0.4407 | 0.7968 | | 0.5239 | 11.0 | 671 | 0.4479 | 0.7842 | | 0.5294 | 12.0 | 732 | 0.4509 | 0.7910 | | 0.531 | 13.0 | 793 | 0.4419 | 0.7933 | | 0.5493 | 14.0 | 854 | 0.4646 | 0.7784 | | 0.4934 | 15.0 | 915 | 0.4310 | 0.7968 | | 0.4965 | 16.0 | 976 | 0.4449 | 0.7876 | | 0.4946 | 17.0 | 1037 | 0.4342 | 0.8129 | | 0.4716 | 18.0 | 1098 | 0.4129 | 0.8140 | | 0.4679 | 19.0 | 1159 | 0.4290 | 0.8002 | | 0.4799 | 20.0 | 1220 | 0.4356 | 0.7842 | | 0.4744 | 21.0 | 1281 | 0.4042 | 0.8094 | | 0.4512 | 22.0 | 1342 | 0.3953 | 0.8117 | | 0.4633 | 23.0 | 1403 | 0.4157 | 0.7956 | | 0.4528 | 24.0 | 1464 | 0.3920 | 0.8094 | | 0.4427 | 25.0 | 1525 | 0.3930 | 0.8220 | | 0.4238 | 26.0 | 1586 | 0.3891 | 0.8140 | | 0.4257 | 27.0 | 1647 | 0.3700 | 0.8255 | | 0.4102 | 28.0 | 1708 | 0.4122 | 0.7968 | | 0.4505 | 29.0 | 1769 | 0.4210 | 0.7945 | | 0.3973 | 30.0 | 1830 | 0.3923 | 0.8197 | | 0.3824 | 31.0 | 1891 | 0.3908 | 0.8473 | | 0.3887 | 32.0 | 1952 | 0.3897 | 0.8312 | | 0.3723 | 33.0 | 2013 | 0.3747 | 0.8381 | | 0.3608 | 34.0 | 2074 | 0.3706 | 0.8301 | | 0.3718 | 35.0 | 2135 | 0.3937 | 0.8255 | | 0.3692 | 36.0 | 2196 | 0.3984 | 0.8037 | | 0.3533 | 37.0 | 2257 | 0.3792 | 0.8335 | | 0.3625 | 38.0 | 2318 | 0.4070 | 0.8163 | | 0.3633 | 39.0 | 2379 | 0.4130 | 0.8232 | | 0.3602 | 40.0 | 2440 | 0.3996 | 0.8186 | | 0.3557 | 41.0 | 2501 | 0.3756 | 0.8335 | | 0.3373 | 42.0 | 2562 | 0.3914 | 0.8220 | | 0.3102 | 43.0 | 2623 | 0.4165 | 0.8507 | | 0.3135 | 44.0 | 2684 | 0.3852 | 0.8278 | | 0.3286 | 45.0 | 2745 | 0.4164 | 0.8450 | | 0.316 | 46.0 | 2806 | 0.3498 | 0.8496 | | 0.2802 | 47.0 | 2867 | 0.3887 | 0.8462 | | 0.3184 | 48.0 | 2928 | 0.3829 | 0.8576 | | 0.2785 | 49.0 | 2989 | 0.3627 | 0.8485 | | 0.2988 | 50.0 | 3050 | 0.3679 | 0.8370 | | 0.267 | 51.0 | 3111 | 0.3528 | 0.8645 | | 0.2907 | 52.0 | 3172 | 0.3538 | 0.8519 | | 0.2857 | 53.0 | 3233 | 0.3593 | 0.8530 | | 0.2651 | 54.0 | 3294 | 0.3732 | 0.8439 | | 0.2447 | 55.0 | 3355 | 0.3441 | 0.8542 | | 0.2542 | 56.0 | 3416 | 0.3897 | 0.8576 | | 0.2634 | 57.0 | 3477 | 0.4082 | 0.8657 | | 0.2505 | 58.0 | 3538 | 0.3416 | 0.8657 | | 0.2555 | 59.0 | 3599 | 0.3725 | 0.8576 | | 0.2466 | 60.0 | 3660 | 0.3496 | 0.8680 | | 0.2585 | 61.0 | 3721 | 0.3214 | 0.8783 | | 0.235 | 62.0 | 3782 | 0.3584 | 0.8737 | | 0.215 | 63.0 | 3843 | 0.3467 | 0.8657 | | 0.236 | 64.0 | 3904 | 0.3471 | 0.8829 | | 0.2211 | 65.0 | 3965 | 0.3318 | 0.8863 | | 0.1989 | 66.0 | 4026 | 0.3645 | 0.8852 | | 0.2133 | 67.0 | 4087 | 0.3456 | 0.8898 | | 0.2169 | 68.0 | 4148 | 0.3287 | 0.8852 | | 0.223 | 69.0 | 4209 | 0.3182 | 0.8921 | | 0.2379 | 70.0 | 4270 | 0.3260 | 0.8840 | | 0.2149 | 71.0 | 4331 | 0.3230 | 0.8886 | | 0.2007 | 72.0 | 4392 | 0.3926 | 0.8760 | | 0.2091 | 73.0 | 4453 | 0.4133 | 0.8783 | | 0.2229 | 74.0 | 4514 | 0.3867 | 0.8772 | | 0.1903 | 75.0 | 4575 | 0.3594 | 0.8840 | | 0.2124 | 76.0 | 4636 | 0.3388 | 0.8875 | | 0.1999 | 77.0 | 4697 | 0.3305 | 0.8875 | | 0.2053 | 78.0 | 4758 | 0.4670 | 0.8840 | | 0.1958 | 79.0 | 4819 | 0.3468 | 0.8909 | | 0.1839 | 80.0 | 4880 | 0.3902 | 0.8886 | | 0.1715 | 81.0 | 4941 | 0.3830 | 0.8875 | | 0.1803 | 82.0 | 5002 | 0.3134 | 0.8967 | | 0.1803 | 83.0 | 5063 | 0.3935 | 0.8909 | | 0.1865 | 84.0 | 5124 | 0.3882 | 0.8863 | | 0.1884 | 85.0 | 5185 | 0.3485 | 0.8990 | | 0.1663 | 86.0 | 5246 | 0.3667 | 0.8944 | | 0.1665 | 87.0 | 5307 | 0.3545 | 0.8932 | | 0.1556 | 88.0 | 5368 | 0.3882 | 0.8944 | | 0.18 | 89.0 | 5429 | 0.3751 | 0.8898 | | 0.1974 | 90.0 | 5490 | 0.3979 | 0.8863 | | 0.1622 | 91.0 | 5551 | 0.3623 | 0.8967 | | 0.1657 | 92.0 | 5612 | 0.3855 | 0.8978 | | 0.1672 | 93.0 | 5673 | 0.3722 | 0.8944 | | 0.1807 | 94.0 | 5734 | 0.3994 | 0.8932 | | 0.1419 | 95.0 | 5795 | 0.4017 | 0.8863 | | 0.178 | 96.0 | 5856 | 0.4168 | 0.8886 | | 0.1402 | 97.0 | 5917 | 0.3727 | 0.8944 | | 0.1427 | 98.0 | 5978 | 0.3919 | 0.8967 | | 0.1318 | 99.0 | 6039 | 0.3843 | 0.8955 | | 0.1417 | 100.0 | 6100 | 0.4017 | 0.8898 | | 0.1536 | 101.0 | 6161 | 0.3613 | 0.8955 | | 0.1631 | 102.0 | 6222 | 0.3377 | 0.9047 | | 0.1459 | 103.0 | 6283 | 0.3724 | 0.8967 | | 0.1499 | 104.0 | 6344 | 0.3934 | 0.8955 | | 0.1572 | 105.0 | 6405 | 0.3368 | 0.8967 | | 0.1308 | 106.0 | 6466 | 0.3782 | 0.8990 | | 0.1535 | 107.0 | 6527 | 0.3306 | 0.9024 | | 0.125 | 108.0 | 6588 | 0.4076 | 0.8898 | | 0.1339 | 109.0 | 6649 | 0.3628 | 0.8990 | | 0.148 | 110.0 | 6710 | 0.3672 | 0.9013 | | 0.1725 | 111.0 | 6771 | 0.4006 | 0.8909 | | 0.1326 | 112.0 | 6832 | 0.4117 | 0.8921 | | 0.1438 | 113.0 | 6893 | 0.3927 | 0.8978 | | 0.1205 | 114.0 | 6954 | 0.3612 | 0.8990 | | 0.1531 | 115.0 | 7015 | 0.3594 | 0.8932 | | 0.1473 | 116.0 | 7076 | 0.4490 | 0.8875 | | 0.1388 | 117.0 | 7137 | 0.3952 | 0.8921 | | 0.136 | 118.0 | 7198 | 0.4098 | 0.8921 | | 0.1579 | 119.0 | 7259 | 0.3595 | 0.9013 | | 0.1359 | 120.0 | 7320 | 0.3970 | 0.8944 | | 0.1314 | 121.0 | 7381 | 0.4092 | 0.8932 | | 0.1337 | 122.0 | 7442 | 0.4192 | 0.8909 | | 0.1538 | 123.0 | 7503 | 0.4154 | 0.8898 | | 0.119 | 124.0 | 7564 | 0.4120 | 0.8909 | | 0.1353 | 125.0 | 7625 | 0.4060 | 0.8921 | | 0.1489 | 126.0 | 7686 | 0.4162 | 0.8909 | | 0.1554 | 127.0 | 7747 | 0.4148 | 0.8944 | | 0.1558 | 128.0 | 7808 | 0.4169 | 0.8944 | | 0.1268 | 129.0 | 7869 | 0.4110 | 0.8955 | | 0.1236 | 130.0 | 7930 | 0.4197 | 0.8944 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "normal", "woody" ]
echarlaix/vit-food101-int8
## [Vision Transformer (ViT)](https://huggingface.co/juliensimon/autotrain-food101-1471154050) quantized and exported to the OpenVINO IR. ## Model Details **Model Description:** This ViT model fine-tuned on Food-101 was statically quantized and exported to the OpenVINO IR using [optimum](https://huggingface.co/docs/optimum/intel/optimization_ov). ## Usage example You can use this model with Transformers *pipeline*. ```python from transformers import pipeline, AutoFeatureExtractor from optimum.intel.openvino import OVModelForImageClassification ​ model_id = "echarlaix/vit-food101-int8" model = OVModelForImageClassification.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) pipe = pipeline("image-classification", model=model, feature_extractor=feature_extractor) outputs = pipe("http://farm2.staticflickr.com/1375/1394861946_171ea43524_z.jpg") ```
[ "apple_pie", "baby_back_ribs", "baklava", "beef_carpaccio", "beef_tartare", "beet_salad", "beignets", "bibimbap", "bread_pudding", "breakfast_burrito", "bruschetta", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheese_plate", "cheesecake", "chicken_curry", "chicken_quesadilla", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare", "waffles" ]
vicm0r/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0715 - Accuracy: 0.9763 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2446 | 1.0 | 190 | 0.1185 | 0.9604 | | 0.166 | 2.0 | 380 | 0.0715 | 0.9763 | | 0.127 | 3.0 | 570 | 0.0663 | 0.9763 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
amyeroberts/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [timm/resnet18.a1_in1k](https://huggingface.co/timm/resnet18.a1_in1k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 1.0324 - Accuracy: 0.6917 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.0884 | 1.0 | 130 | 1.0903 | 0.4060 | | 1.0721 | 2.0 | 260 | 1.0681 | 0.5188 | | 1.0623 | 3.0 | 390 | 1.0460 | 0.6391 | | 1.052 | 4.0 | 520 | 1.0410 | 0.6165 | | 1.0519 | 5.0 | 650 | 1.0324 | 0.6917 | ### Framework versions - Transformers 4.46.0.dev0 - Pytorch 2.4.0 - Datasets 2.15.1.dev0 - Tokenizers 0.20.0
[ "angular_leaf_spot", "bean_rust", "healthy" ]
christyli/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3930 - Accuracy: 0.9774 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.0349 | 1.0 | 17 | 0.8167 | 0.9323 | | 0.7502 | 2.0 | 34 | 0.6188 | 0.9699 | | 0.5508 | 3.0 | 51 | 0.4856 | 0.9774 | | 0.4956 | 4.0 | 68 | 0.4109 | 0.9774 | | 0.4261 | 5.0 | 85 | 0.3930 | 0.9774 | ### Framework versions - Transformers 4.22.0.dev0 - Pytorch 1.12.1+cu102 - Tokenizers 0.12.1
[ "angular_leaf_spot", "bean_rust", "healthy" ]
sergiocannata/convnext-tiny-224-finetuned-brs
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224-finetuned-brs This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.8667 - Accuracy: 0.8235 - F1: 0.7273 - Precision (ppv): 0.8 - Recall (sensitivity): 0.6667 - Specificity: 0.9091 - Npv: 0.8333 - Auc: 0.7879 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision (ppv) | Recall (sensitivity) | Specificity | Npv | Auc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------------:|:--------------------:|:-----------:|:------:|:------:| | 0.6766 | 6.25 | 100 | 0.7002 | 0.4706 | 0.5263 | 0.3846 | 0.8333 | 0.2727 | 0.75 | 0.5530 | | 0.6408 | 12.49 | 200 | 0.6770 | 0.6471 | 0.5714 | 0.5 | 0.6667 | 0.6364 | 0.7778 | 0.6515 | | 0.464 | 18.74 | 300 | 0.6624 | 0.5882 | 0.5882 | 0.4545 | 0.8333 | 0.4545 | 0.8333 | 0.6439 | | 0.4295 | 24.98 | 400 | 0.6938 | 0.5294 | 0.5 | 0.4 | 0.6667 | 0.4545 | 0.7143 | 0.5606 | | 0.3952 | 31.25 | 500 | 0.5974 | 0.7059 | 0.6154 | 0.5714 | 0.6667 | 0.7273 | 0.8 | 0.6970 | | 0.1082 | 37.49 | 600 | 0.6163 | 0.6471 | 0.5 | 0.5 | 0.5 | 0.7273 | 0.7273 | 0.6136 | | 0.1997 | 43.74 | 700 | 0.6155 | 0.7059 | 0.6154 | 0.5714 | 0.6667 | 0.7273 | 0.8 | 0.6970 | | 0.1267 | 49.98 | 800 | 0.9063 | 0.6471 | 0.5714 | 0.5 | 0.6667 | 0.6364 | 0.7778 | 0.6515 | | 0.1178 | 56.25 | 900 | 0.8672 | 0.7059 | 0.6667 | 0.5556 | 0.8333 | 0.6364 | 0.875 | 0.7348 | | 0.2008 | 62.49 | 1000 | 0.7049 | 0.8235 | 0.7692 | 0.7143 | 0.8333 | 0.8182 | 0.9 | 0.8258 | | 0.0996 | 68.74 | 1100 | 0.4510 | 0.8235 | 0.7692 | 0.7143 | 0.8333 | 0.8182 | 0.9 | 0.8258 | | 0.0115 | 74.98 | 1200 | 0.7561 | 0.8235 | 0.7692 | 0.7143 | 0.8333 | 0.8182 | 0.9 | 0.8258 | | 0.0177 | 81.25 | 1300 | 1.0400 | 0.7059 | 0.6667 | 0.5556 | 0.8333 | 0.6364 | 0.875 | 0.7348 | | 0.0261 | 87.49 | 1400 | 0.9139 | 0.8235 | 0.7692 | 0.7143 | 0.8333 | 0.8182 | 0.9 | 0.8258 | | 0.028 | 93.74 | 1500 | 0.7367 | 0.7647 | 0.7143 | 0.625 | 0.8333 | 0.7273 | 0.8889 | 0.7803 | | 0.0056 | 99.98 | 1600 | 0.8667 | 0.8235 | 0.7273 | 0.8 | 0.6667 | 0.9091 | 0.8333 | 0.7879 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "event", "no_event" ]
sergiocannata/VANBase-finetuned-brs
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # VANBase-finetuned-brs-finetuned-brs This model is a fine-tuned version of [Visual-Attention-Network/van-base](https://huggingface.co/Visual-Attention-Network/van-base) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.7056 - Accuracy: 0.5882 - F1: 0.6957 - Precision (ppv): 0.6154 - Recall (sensitivity): 0.8 - Specificity: 0.2857 - Npv: 0.5 - Auc: 0.5429 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision (ppv) | Recall (sensitivity) | Specificity | Npv | Auc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------------:|:--------------------:|:-----------:|:------:|:------:| | 0.6589 | 6.25 | 100 | 0.6655 | 0.5882 | 0.6316 | 0.6667 | 0.6 | 0.5714 | 0.5 | 0.5857 | | 0.6262 | 12.49 | 200 | 0.6917 | 0.5294 | 0.6364 | 0.5833 | 0.7 | 0.2857 | 0.4 | 0.4929 | | 0.4706 | 18.74 | 300 | 0.6776 | 0.5882 | 0.6957 | 0.6154 | 0.8 | 0.2857 | 0.5 | 0.5429 | | 0.5202 | 24.98 | 400 | 0.7018 | 0.5294 | 0.6 | 0.6 | 0.6 | 0.4286 | 0.4286 | 0.5143 | | 0.4628 | 31.25 | 500 | 0.6903 | 0.6471 | 0.75 | 0.6429 | 0.9 | 0.2857 | 0.6667 | 0.5929 | | 0.3525 | 37.49 | 600 | 0.7241 | 0.5294 | 0.6667 | 0.5714 | 0.8 | 0.1429 | 0.3333 | 0.4714 | | 0.2877 | 43.74 | 700 | 0.8262 | 0.5882 | 0.7407 | 0.5882 | 1.0 | 0.0 | nan | 0.5 | | 0.2921 | 49.98 | 800 | 0.8058 | 0.4706 | 0.64 | 0.5333 | 0.8 | 0.0 | 0.0 | 0.4 | | 0.3834 | 56.25 | 900 | 0.7864 | 0.5882 | 0.7407 | 0.5882 | 1.0 | 0.0 | nan | 0.5 | | 0.2267 | 62.49 | 1000 | 0.5520 | 0.7647 | 0.8182 | 0.75 | 0.9 | 0.5714 | 0.8 | 0.7357 | | 0.3798 | 68.74 | 1100 | 0.8722 | 0.4706 | 0.64 | 0.5333 | 0.8 | 0.0 | 0.0 | 0.4 | | 0.2633 | 74.98 | 1200 | 0.7260 | 0.6471 | 0.7273 | 0.6667 | 0.8 | 0.4286 | 0.6 | 0.6143 | | 0.3439 | 81.25 | 1300 | 1.0187 | 0.4118 | 0.5455 | 0.5 | 0.6 | 0.1429 | 0.2 | 0.3714 | | 0.2532 | 87.49 | 1400 | 0.8812 | 0.5882 | 0.7407 | 0.5882 | 1.0 | 0.0 | nan | 0.5 | | 0.0841 | 93.74 | 1500 | 0.8717 | 0.5294 | 0.6923 | 0.5625 | 0.9 | 0.0 | 0.0 | 0.45 | | 0.3409 | 99.98 | 1600 | 0.7056 | 0.5882 | 0.6957 | 0.6154 | 0.8 | 0.2857 | 0.5 | 0.5429 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "event", "no_event" ]
Killerw/autotrain-garry-gen1-8-1942865478
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 1942865478 - CO2 Emissions (in grams): 64.1784 ## Validation Metrics - Loss: 2.036 - Accuracy: 0.899 - Macro F1: 0.871 - Micro F1: 0.899 - Weighted F1: 0.878 - Macro Precision: 0.870 - Micro Precision: 0.899 - Weighted Precision: 0.877 - Macro Recall: 0.886 - Micro Recall: 0.899 - Weighted Recall: 0.899
[ "abomasnow", "abra", "alolan diglett", "bulbasaur", "buneary", "bunnelby", "burmy", "butterfree", "buzzwole", "cacnea", "cacturne", "calyrex", "camerupt", "alolan dugtrio", "carbink", "carkol", "carnivine", "carracosta", "carvanha", "cascoon", "caterpie", "celebi", "celesteela", "centiskorch", "alolan exeggutor", "chandelure", "chansey", "charizard", "charjabug", "charmander", "charmeleon", "chatot", "cherrim", "cherubi", "chesnaught", "alolan geodude", "chespin", "chewtle", "chikorita", "chimchar", "chimecho", "chinchou", "chingling", "cinccino", "cinderace", "clamperl", "alolan golem", "clauncher", "clawitzer", "claydol", "clefable", "clefairy", "cleffa", "clobbopus", "cloyster", "coalossal", "cobalion", "alolan graveler", "cofagrigus", "combee", "combusken", "comfey", "conkeldurr", "copperajah", "corphish", "corsola", "corviknight", "corvisquire", "alolan grimer", "cosmoem", "cosmog", "cottonee", "crabominable", "crabrawler", "cradily", "cramorant", "cranidos", "crawdaunt", "cresselia", "alolan marowak", "croagunk", "crobat", "croconaw", "crustle", "cryogonal", "cubchoo", "cubone", "cufant", "cursola", "cutiefly", "alolan meowth", "cyndaquil", "darkrai", "dartrix", "darumaka", "decidueye", "dedenne", "deerling", "deino", "delcatty", "delibird", "alolan muk", "delphox", "deoxys", "dewgong", "dewott", "dewpider", "dhelmise", "dialga", "diancie", "diggersby", "diglett", "absol", "alolan ninetales", "ditto", "dodrio", "doduo", "donphan", "dottler", "doublade", "dracovish", "dracozolt", "dragalge", "dragapult", "alolan persian", "dragonair", "dragonite", "drakloak", "drampa", "drapion", "dratini", "drednaw", "dreepy", "drifblim", "drifloon", "alolan raichu", "drilbur", "drizzile", "drowzee", "druddigon", "dubwool", "ducklett", "dugtrio", "dunsparce", "duosion", "duraludon", "alolan raticate", "durant", "dusclops", "dusknoir", "duskull", "dustox", "dwebble", "eelektrik", "eelektross", "eevee", "eiscue", "alolan rattata", "ekans", "eldegoss", "electabuzz", "electivire", "electrike", "electrode", "elekid", "elgyem", "emboar", "emolga", "alolan sandshrew", "empoleon", "enamorus", "entei", "escavalier", "espeon", "espurr", "eternatus", "excadrill", "exeggcute", "exeggutor", "alolan sandslash", "exploud", "falinks", "farfetch'd", "fearow", "feebas", "fennekin", "feraligatr", "ferroseed", "ferrothorn", "finneon", "alolan vulpix", "flaaffy", "flapple", "flareon", "fletchinder", "fletchling", "floatzel", "floette", "florges", "flygon", "fomantis", "alomomola", "foongus", "forretress", "fraxure", "frillish", "froakie", "frogadier", "froslass", "frosmoth", "furfrou", "furret", "altaria", "gabite", "galarian corsola", "galarian darmanitan", "galarian darumaka", "galarian farfetch'd", "galarian linoone", "galarian meowth", "galarian mr. mime", "galarian ponyta", "galarian rapidash", "accelgor", "amaura", "galarian stunfisk", "galarian weezing", "galarian yamask", "galarian zigzagoon", "gallade", "galvantula", "garbodor", "garchomp", "gardevoir", "gastly", "ambipom", "gastrodon", "genesect", "gengar", "geodude", "gible", "gigalith", "girafarig", "giratina", "glaceon", "glalie", "amoonguss", "glameow", "glastrier", "gligar", "gliscor", "gloom", "gogoat", "golbat", "goldeen", "golduck", "golem", "ampharos", "golett", "golisopod", "golurk", "goodra", "goomy", "gorebyss", "gossifleur", "gothita", "gothitelle", "gothorita", "anorith", "gourgeist", "granbull", "grapploct", "graveler", "greedent", "greninja", "grimer", "grimmsnarl", "grookey", "grotle", "appletun", "groudon", "grovyle", "growlithe", "grubbin", "grumpig", "gulpin", "gumshoos", "gurdurr", "guzzlord", "gyarados", "applin", "hakamo-o", "happiny", "hariyama", "hatenna", "hatterene", "hattrem", "haunter", "hawlucha", "haxorus", "heatmor", "araquanid", "heatran", "heliolisk", "helioptile", "heracross", "herdier", "hippopotas", "hippowdon", "hisuian arcanine", "hisuian avalugg", "hisuian braviary", "arbok", "hisuian decidueye", "hisuian electrode", "hisuian goodra", "hisuian growlithe", "hisuian lilligant", "hisuian qwilfish", "hisuian samurott", "hisuian sliggoo", "hisuian sneasel", "hisuian typhlosion", "arcanine", "hisuian voltorb", "hisuian zoroak", "hisuian zorua", "hitmonchan", "hitmonlee", "hitmontop", "honchkrow", "honedge", "hoopa", "hoothoot", "aegislash", "archen", "hoppip", "horsea", "houndoom", "houndour", "huntail", "hydreigon", "hypno", "igglybuff", "illumise", "impidimp", "archeops", "incineroar", "indeedee", "infernape", "inkay", "inteleon", "ivysaur", "jangmo-o", "jellicent", "jigglypuff", "jirachi", "arctovish", "jolteon", "joltik", "jumpluff", "jynx", "kabuto", "kabutops", "kadabra", "kakuna", "kangaskhan", "karrablast", "arctozolt", "kartana", "kecleon", "keldeo", "kingdra", "kingler", "kirlia", "klang", "kleavor", "klefki", "klink", "ariados", "klinklang", "koffing", "komala", "kommo-o", "krabby", "kricketot", "kricketune", "krokorok", "krookodile", "kubfu", "armaldo", "kyogre", "kyurem", "lairon", "lampent", "landorus", "lanturn", "lapras", "larvesta", "larvitar", "latias", "aromatisse", "latios", "leafeon", "leavanny", "ledian", "ledyba", "lickilicky", "lickitung", "liepard", "lileep", "lilligant", "aron", "lillipup", "linoone", "litleo", "litten", "litwick", "lombre", "lopunny", "lotad", "loudred", "lucario", "arrokuda", "ludicolo", "lugia", "lumineon", "lunala", "lunatone", "lurantis", "luvdisc", "luxio", "luxray", "lycanroc", "articuno", "machamp", "machoke", "machop", "magby", "magcargo", "magearna", "magikarp", "magmar", "magmortar", "magnemite", "aerodactyl", "audino", "magneton", "magnezone", "makuhita", "malamar", "mamoswine", "manaphy", "mandibuzz", "manectric", "mankey", "mantine", "aurorus", "mantyke", "maractus", "mareanie", "mareep", "marill", "marowak", "marshadow", "marshtomp", "masquerain", "mawile", "avalugg", "medicham", "meditite", "meganium", "melmetal", "meltan", "meowth", "mesprit", "metagross", "metang", "metapod", "axew", "mew", "mewtwo", "mienfoo", "mienshao", "mightyena", "milcery", "milotic", "miltank", "mime jr", "mimikyu", "azelf", "minccino", "minior", "minun", "misdreavus", "mismagius", "moltres", "monferno", "morelull", "morgrem", "morpeko", "azumarill", "mothim", "mr. mime", "mr. rime", "mudbray", "mudkip", "mudsdale", "muk", "munchlax", "munna", "murkrow", "azurill", "musharna", "naganadel", "natu", "nickit", "nidoking", "nidoqueen", "nidoran♀", "nidoran♂", "nidorina", "nidorino", "bagon", "nihilego", "nincada", "ninetales", "ninjask", "noctowl", "noibat", "noivern", "nosepass", "numel", "nuzleaf", "baltoy", "obstagoon", "octillery", "oddish", "omanyte", "omastar", "onix", "oranguru", "orbeetle", "oricorio", "oshawott", "banette", "overqwil", "pachirisu", "palkia", "palossand", "palpitoad", "pancham", "pangoro", "panpour", "pansage", "pansear", "aggron", "barbaracle", "paras", "parasect", "passimian", "patrat", "pawniard", "pelipper", "perrserker", "persian", "petilil", "phanpy", "barboach", "phantump", "pheromosa", "phione", "pichu", "pidgeot", "pidgeotto", "pidgey", "pidove", "pignite", "pikachu", "barraskewda", "pikipek", "piloswine", "pincurchin", "pineco", "pinsir", "piplup", "plusle", "poipole", "politoed", "poliwag", "basculegion", "poliwhirl", "poliwrath", "polteageist", "ponyta", "poochyena", "popplio", "porygon", "porygon2", "primarina", "primeape", "basculin", "prinplup", "probopass", "psyduck", "pumpkaboo", "pupitar", "purrloin", "purugly", "pyroar", "pyukumuku", "quagsire", "bastiodon", "quilava", "quilladin", "qwilfish", "raboot", "raichu", "raikou", "ralts", "rampardos", "rapidash", "raticate", "bayleef", "rattata", "rayquaza", "regice", "regidrago", "regieleki", "regigigas", "regirock", "registeel", "relicanth", "remoraid", "beartic", "reshiram", "reuniclus", "rhydon", "rhyhorn", "rhyperior", "ribombee", "rillaboom", "riolu", "rockruff", "roggenrola", "beautifly", "rolycoly", "rookidee", "roselia", "roserade", "rotom", "rowlet", "rufflet", "runerigus", "sableye", "salamence", "beedrill", "salandit", "salazzle", "samurott", "sandaconda", "sandile", "sandshrew", "sandslash", "sandygast", "sawk", "sawsbuck", "aipom", "beheeyem", "scatterbug", "sceptile", "scizor", "scolipede", "scorbunny", "scrafty", "scraggy", "scyther", "seadra", "seaking", "beldum", "sealeo", "seedot", "seel", "seismitoad", "sentret", "serperior", "servine", "seviper", "sewaddle", "sharpedo", "bellossom", "shaymin", "shedinja", "shelgon", "shellder", "shellos", "shelmet", "shieldon", "shiftry", "shiinotic", "shinx", "bellsprout", "shroomish", "shuckle", "shuppet", "sigilyph", "silcoon", "silicobra", "simipour", "simisage", "simisear", "sinistea", "bergmite", "sirfetch'd", "sizzlipede", "skarmory", "skiddo", "skiploom", "skitty", "skorupi", "skrelp", "skuntank", "skwovet", "bewear", "slaking", "slakoth", "sliggoo", "slowbro", "slowking", "slowpoke", "slugma", "slurpuff", "smeargle", "smoochum", "bibarel", "sneasel", "sneasler", "snivy", "snom", "snorlax", "snorunt", "snover", "snubbull", "sobble", "solgaleo", "bidoof", "solosis", "solrock", "spearow", "spectrier", "spewpa", "spheal", "spinarak", "spinda", "spiritomb", "spoink", "binacle", "spritzee", "squirtle", "stakataka", "stantler", "staraptor", "staravia", "starly", "starmie", "staryu", "steelix", "bisharp", "steenee", "stonjourner", "stoutland", "stufful", "stunfisk", "stunky", "sudowoodo", "suicune", "sunflora", "sunkern", "alakazam", "blacephalon", "surskit", "swablu", "swadloon", "swalot", "swampert", "swanna", "swellow", "swinub", "swirlix", "swoobat", "blastoise", "sylveon", "taillow", "talonflame", "tangela", "tangrowth", "tapu bulu", "tapu fini", "tapu koko", "tapu lele", "tauros", "blaziken", "teddiursa", "tentacool", "tentacruel", "tepig", "terrakion", "therian enamorus", "thievul", "throh", "thundurus", "thwackey", "blipbug", "timburr", "tirtouga", "togedemaru", "togekiss", "togepi", "togetic", "torchic", "torkoal", "tornadus", "torracat", "blissey", "torterra", "totodile", "toucannon", "toxapex", "toxel", "toxicroak", "toxtricity", "tranquill", "trapinch", "treecko", "blitzle", "trevenant", "tropius", "trubbish", "trumbeak", "tsareena", "turtonator", "turtwig", "tympole", "tynamo", "type_null", "boldore", "typhlosion", "tyranitar", "tyrantrum", "tyrogue", "tyrunt", "umbreon", "unfezant", "unown", "ursaluna", "urshifu", "boltund", "uxie", "vanillish", "vanillite", "vanilluxe", "vaporeon", "venipede", "venomoth", "venonat", "venusaur", "vespiquen", "bonsly", "vibrava", "victini", "victreebel", "vigoroth", "vikavolt", "vileplume", "virizion", "vivillon", "volbeat", "volcanion", "bouffalant", "volcarona", "voltorb", "vullaby", "vulpix", "wailmer", "wailord", "walrein", "wartortle", "watchog", "weavile", "alcremie", "bounsweet", "weedle", "weepinbell", "weezing", "whimsicott", "whirlipede", "whiscash", "whismur", "wigglytuff", "wimpod", "wingull", "braixen", "wishiwashi", "wobbuffet", "woobat", "wooloo", "wooper", "wormadam", "wurmple", "wynaut", "wyrdeer", "xatu", "braviary", "xerneas", "xurkitree", "yamask", "yamper", "yanma", "yanmega", "yungoos", "yveltal", "zacian", "zamazenta", "breloom", "zamazenta crowned", "zangoose", "zapdos", "zarude", "zebstrika", "zekrom", "zeraora", "zigzagoon", "zoroark", "zorua", "brionne", "zubat", "zweilous", "zygarde", "castform", "bronzong", "bronzor", "bruxish", "budew", "buizel" ]
venuv62/beit-base-patch16-224-pt22k-ft22k
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.1433 - Accuracy: 0.3333 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.67 | 1 | 1.5398 | 0.1667 | | No log | 1.67 | 2 | 1.1394 | 0.5556 | | No log | 2.67 | 3 | 1.1433 | 0.3333 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "0_none_absent", "1_mild_just_perceptible", "2_moderate_obvious", "3_severe" ]
venuv62/vit-base-patch16-224
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224 This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2817 - Accuracy: 0.7205 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.8025 | 0.98 | 27 | 0.6852 | 0.6407 | | 0.5648 | 1.98 | 54 | 0.7587 | 0.6971 | | 0.2165 | 2.98 | 81 | 0.6410 | 0.7387 | | 0.0587 | 3.98 | 108 | 1.9350 | 0.5682 | | 0.041 | 4.98 | 135 | 0.9925 | 0.7348 | | 0.013 | 5.98 | 162 | 1.3159 | 0.6980 | | 0.025 | 6.98 | 189 | 1.4855 | 0.7456 | | 0.0243 | 7.98 | 216 | 1.4230 | 0.7489 | | 0.0016 | 8.98 | 243 | 1.2937 | 0.7117 | | 0.0026 | 9.98 | 270 | 1.2817 | 0.7205 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "imposter", "real" ]
AndreIchiro/swinv2-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swinv2-finetuned-eurosat This model is a fine-tuned version of [microsoft/swinv2-base-patch4-window16-256](https://huggingface.co/microsoft/swinv2-base-patch4-window16-256) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
sergiocannata/convnext-tiny-224-finetuned-brs2
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224-finetuned-brs2 This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.2502 - Accuracy: 0.7925 - F1: 0.7556 - Precision (ppv): 0.8095 - Recall (sensitivity): 0.7083 - Specificity: 0.8621 - Npv: 0.7812 - Auc: 0.7852 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision (ppv) | Recall (sensitivity) | Specificity | Npv | Auc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------------:|:--------------------:|:-----------:|:------:|:------:| | 0.6884 | 1.89 | 100 | 0.6907 | 0.5472 | 0.4286 | 0.5 | 0.375 | 0.6897 | 0.5714 | 0.5323 | | 0.5868 | 3.77 | 200 | 0.6604 | 0.6415 | 0.4242 | 0.7778 | 0.2917 | 0.9310 | 0.6136 | 0.6114 | | 0.4759 | 5.66 | 300 | 0.6273 | 0.6604 | 0.5 | 0.75 | 0.375 | 0.8966 | 0.6341 | 0.6358 | | 0.3599 | 7.55 | 400 | 0.6520 | 0.6604 | 0.5 | 0.75 | 0.375 | 0.8966 | 0.6341 | 0.6358 | | 0.3248 | 9.43 | 500 | 0.9115 | 0.6415 | 0.4571 | 0.7273 | 0.3333 | 0.8966 | 0.6190 | 0.6149 | | 0.3117 | 11.32 | 600 | 0.8608 | 0.6604 | 0.5263 | 0.7143 | 0.4167 | 0.8621 | 0.6410 | 0.6394 | | 0.4208 | 13.21 | 700 | 0.8774 | 0.6792 | 0.5641 | 0.7333 | 0.4583 | 0.8621 | 0.6579 | 0.6602 | | 0.5267 | 15.09 | 800 | 1.0131 | 0.6792 | 0.5405 | 0.7692 | 0.4167 | 0.8966 | 0.65 | 0.6566 | | 0.234 | 16.98 | 900 | 1.1498 | 0.6981 | 0.5556 | 0.8333 | 0.4167 | 0.9310 | 0.6585 | 0.6739 | | 0.7581 | 18.87 | 1000 | 1.0952 | 0.7170 | 0.6154 | 0.8 | 0.5 | 0.8966 | 0.6842 | 0.6983 | | 0.1689 | 20.75 | 1100 | 1.1653 | 0.6981 | 0.5789 | 0.7857 | 0.4583 | 0.8966 | 0.6667 | 0.6774 | | 0.0765 | 22.64 | 1200 | 1.1245 | 0.7170 | 0.6667 | 0.7143 | 0.625 | 0.7931 | 0.7188 | 0.7091 | | 0.6287 | 24.53 | 1300 | 1.2222 | 0.6981 | 0.6 | 0.75 | 0.5 | 0.8621 | 0.6757 | 0.6810 | | 0.0527 | 26.42 | 1400 | 1.2350 | 0.7358 | 0.6818 | 0.75 | 0.625 | 0.8276 | 0.7273 | 0.7263 | | 0.3622 | 28.3 | 1500 | 1.1022 | 0.7547 | 0.6667 | 0.8667 | 0.5417 | 0.9310 | 0.7105 | 0.7364 | | 0.3227 | 30.19 | 1600 | 1.1541 | 0.7170 | 0.6154 | 0.8 | 0.5 | 0.8966 | 0.6842 | 0.6983 | | 0.3849 | 32.08 | 1700 | 1.2818 | 0.7170 | 0.6154 | 0.8 | 0.5 | 0.8966 | 0.6842 | 0.6983 | | 0.4528 | 33.96 | 1800 | 1.3213 | 0.6981 | 0.5789 | 0.7857 | 0.4583 | 0.8966 | 0.6667 | 0.6774 | | 0.1824 | 35.85 | 1900 | 1.3171 | 0.7170 | 0.6512 | 0.7368 | 0.5833 | 0.8276 | 0.7059 | 0.7055 | | 0.0367 | 37.74 | 2000 | 1.4484 | 0.7170 | 0.6154 | 0.8 | 0.5 | 0.8966 | 0.6842 | 0.6983 | | 0.07 | 39.62 | 2100 | 1.3521 | 0.7547 | 0.6977 | 0.7895 | 0.625 | 0.8621 | 0.7353 | 0.7435 | | 0.0696 | 41.51 | 2200 | 1.2636 | 0.7358 | 0.65 | 0.8125 | 0.5417 | 0.8966 | 0.7027 | 0.7191 | | 0.1554 | 43.4 | 2300 | 1.2225 | 0.7358 | 0.6667 | 0.7778 | 0.5833 | 0.8621 | 0.7143 | 0.7227 | | 0.2346 | 45.28 | 2400 | 1.2627 | 0.7547 | 0.6829 | 0.8235 | 0.5833 | 0.8966 | 0.7222 | 0.7399 | | 0.097 | 47.17 | 2500 | 1.4892 | 0.7170 | 0.6667 | 0.7143 | 0.625 | 0.7931 | 0.7188 | 0.7091 | | 0.2494 | 49.06 | 2600 | 1.5282 | 0.7170 | 0.6512 | 0.7368 | 0.5833 | 0.8276 | 0.7059 | 0.7055 | | 0.0734 | 50.94 | 2700 | 1.3989 | 0.7170 | 0.6341 | 0.7647 | 0.5417 | 0.8621 | 0.6944 | 0.7019 | | 0.1077 | 52.83 | 2800 | 1.5155 | 0.6792 | 0.5641 | 0.7333 | 0.4583 | 0.8621 | 0.6579 | 0.6602 | | 0.2456 | 54.72 | 2900 | 1.4400 | 0.7170 | 0.6512 | 0.7368 | 0.5833 | 0.8276 | 0.7059 | 0.7055 | | 0.0823 | 56.6 | 3000 | 1.4511 | 0.7358 | 0.65 | 0.8125 | 0.5417 | 0.8966 | 0.7027 | 0.7191 | | 0.0471 | 58.49 | 3100 | 1.5114 | 0.7547 | 0.6829 | 0.8235 | 0.5833 | 0.8966 | 0.7222 | 0.7399 | | 0.0144 | 60.38 | 3200 | 1.4412 | 0.7925 | 0.7317 | 0.8824 | 0.625 | 0.9310 | 0.75 | 0.7780 | | 0.1235 | 62.26 | 3300 | 1.2029 | 0.7547 | 0.6977 | 0.7895 | 0.625 | 0.8621 | 0.7353 | 0.7435 | | 0.0121 | 64.15 | 3400 | 1.4925 | 0.7358 | 0.6667 | 0.7778 | 0.5833 | 0.8621 | 0.7143 | 0.7227 | | 0.2126 | 66.04 | 3500 | 1.3614 | 0.7547 | 0.6667 | 0.8667 | 0.5417 | 0.9310 | 0.7105 | 0.7364 | | 0.0496 | 67.92 | 3600 | 1.2960 | 0.7736 | 0.7143 | 0.8333 | 0.625 | 0.8966 | 0.7429 | 0.7608 | | 0.1145 | 69.81 | 3700 | 1.3763 | 0.7547 | 0.6829 | 0.8235 | 0.5833 | 0.8966 | 0.7222 | 0.7399 | | 0.1272 | 71.7 | 3800 | 1.6328 | 0.7170 | 0.5946 | 0.8462 | 0.4583 | 0.9310 | 0.675 | 0.6947 | | 0.0007 | 73.58 | 3900 | 1.5622 | 0.7547 | 0.6977 | 0.7895 | 0.625 | 0.8621 | 0.7353 | 0.7435 | | 0.0101 | 75.47 | 4000 | 1.1811 | 0.7925 | 0.7442 | 0.8421 | 0.6667 | 0.8966 | 0.7647 | 0.7816 | | 0.0002 | 77.36 | 4100 | 1.8533 | 0.6981 | 0.5789 | 0.7857 | 0.4583 | 0.8966 | 0.6667 | 0.6774 | | 0.0423 | 79.25 | 4200 | 1.2510 | 0.7547 | 0.6977 | 0.7895 | 0.625 | 0.8621 | 0.7353 | 0.7435 | | 0.0036 | 81.13 | 4300 | 1.3443 | 0.7547 | 0.6829 | 0.8235 | 0.5833 | 0.8966 | 0.7222 | 0.7399 | | 0.0432 | 83.02 | 4400 | 1.2864 | 0.7736 | 0.7273 | 0.8 | 0.6667 | 0.8621 | 0.7576 | 0.7644 | | 0.0021 | 84.91 | 4500 | 0.8999 | 0.7925 | 0.7755 | 0.76 | 0.7917 | 0.7931 | 0.8214 | 0.7924 | | 0.0002 | 86.79 | 4600 | 1.3634 | 0.7925 | 0.7442 | 0.8421 | 0.6667 | 0.8966 | 0.7647 | 0.7816 | | 0.0044 | 88.68 | 4700 | 1.7830 | 0.7358 | 0.65 | 0.8125 | 0.5417 | 0.8966 | 0.7027 | 0.7191 | | 0.0003 | 90.57 | 4800 | 1.2640 | 0.7736 | 0.7273 | 0.8 | 0.6667 | 0.8621 | 0.7576 | 0.7644 | | 0.0253 | 92.45 | 4900 | 1.2649 | 0.7925 | 0.7442 | 0.8421 | 0.6667 | 0.8966 | 0.7647 | 0.7816 | | 0.0278 | 94.34 | 5000 | 1.7485 | 0.7170 | 0.6512 | 0.7368 | 0.5833 | 0.8276 | 0.7059 | 0.7055 | | 0.1608 | 96.23 | 5100 | 1.2641 | 0.8113 | 0.7727 | 0.85 | 0.7083 | 0.8966 | 0.7879 | 0.8024 | | 0.0017 | 98.11 | 5200 | 1.6380 | 0.7170 | 0.6667 | 0.7143 | 0.625 | 0.7931 | 0.7188 | 0.7091 | | 0.001 | 100.0 | 5300 | 1.2502 | 0.7925 | 0.7556 | 0.8095 | 0.7083 | 0.8621 | 0.7812 | 0.7852 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "event", "no_event" ]
sergiocannata/cvt-21-finetuned-brs2
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cvt-21-finetuned-brs2 This model is a fine-tuned version of [microsoft/cvt-21](https://huggingface.co/microsoft/cvt-21) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6947 - Accuracy: 0.6604 - F1: 0.6087 - Precision (ppv): 0.5385 - Recall (sensitivity): 0.7 - Specificity: 0.6364 - Npv: 0.7778 - Auc: 0.6682 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 4 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision (ppv) | Recall (sensitivity) | Specificity | Npv | Auc | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------------:|:--------------------:|:-----------:|:------:|:------:| | 0.8177 | 1.89 | 100 | 0.7113 | 0.5283 | 0.5098 | 0.4194 | 0.65 | 0.4545 | 0.6818 | 0.5523 | | 0.736 | 3.77 | 200 | 0.7178 | 0.5283 | 0.3902 | 0.3810 | 0.4 | 0.6061 | 0.625 | 0.5030 | | 0.5978 | 5.66 | 300 | 0.6889 | 0.6038 | 0.5532 | 0.4815 | 0.65 | 0.5758 | 0.7308 | 0.6129 | | 0.5576 | 7.55 | 400 | 0.7349 | 0.4717 | 0.5484 | 0.4048 | 0.85 | 0.2424 | 0.7273 | 0.5462 | | 0.5219 | 9.43 | 500 | 0.6522 | 0.6038 | 0.4 | 0.4667 | 0.35 | 0.7576 | 0.6579 | 0.5538 | | 0.5326 | 11.32 | 600 | 0.6665 | 0.6226 | 0.5238 | 0.5 | 0.55 | 0.6667 | 0.7097 | 0.6083 | | 0.4381 | 13.21 | 700 | 0.7685 | 0.4717 | 0.5333 | 0.4 | 0.8 | 0.2727 | 0.6923 | 0.5364 | | 0.5598 | 15.09 | 800 | 0.7212 | 0.5283 | 0.1935 | 0.2727 | 0.15 | 0.7576 | 0.5952 | 0.4538 | | 0.6887 | 16.98 | 900 | 0.6985 | 0.6604 | 0.64 | 0.5333 | 0.8 | 0.5758 | 0.8261 | 0.6879 | | 0.7594 | 18.87 | 1000 | 0.7040 | 0.5472 | 0.4286 | 0.4091 | 0.45 | 0.6061 | 0.6452 | 0.5280 | | 0.2177 | 20.75 | 1100 | 0.8056 | 0.4528 | 0.5397 | 0.3953 | 0.85 | 0.2121 | 0.7 | 0.5311 | | 0.4893 | 22.64 | 1200 | 0.8821 | 0.3396 | 0.3860 | 0.2973 | 0.55 | 0.2121 | 0.4375 | 0.3811 | | 0.5994 | 24.53 | 1300 | 0.8059 | 0.5660 | 0.5660 | 0.4545 | 0.75 | 0.4545 | 0.75 | 0.6023 | | 0.5179 | 26.42 | 1400 | 0.6750 | 0.6038 | 0.4615 | 0.4737 | 0.45 | 0.6970 | 0.6765 | 0.5735 | | 0.198 | 28.3 | 1500 | 0.7448 | 0.3962 | 0.3333 | 0.2857 | 0.4 | 0.3939 | 0.52 | 0.3970 | | 0.6536 | 30.19 | 1600 | 0.7555 | 0.5094 | 0.4583 | 0.3929 | 0.55 | 0.4848 | 0.64 | 0.5174 | | 0.7558 | 32.08 | 1700 | 0.6664 | 0.5849 | 0.4762 | 0.4545 | 0.5 | 0.6364 | 0.6774 | 0.5682 | | 0.4915 | 33.96 | 1800 | 0.9213 | 0.3962 | 0.5152 | 0.3696 | 0.85 | 0.1212 | 0.5714 | 0.4856 | | 0.3661 | 35.85 | 1900 | 0.9202 | 0.4528 | 0.4912 | 0.3784 | 0.7 | 0.3030 | 0.625 | 0.5015 | | 0.4838 | 37.74 | 2000 | 0.9297 | 0.4528 | 0.5085 | 0.3846 | 0.75 | 0.2727 | 0.6429 | 0.5114 | | 0.8461 | 39.62 | 2100 | 0.9464 | 0.4717 | 0.5758 | 0.4130 | 0.95 | 0.1818 | 0.8571 | 0.5659 | | 0.6937 | 41.51 | 2200 | 0.7129 | 0.5094 | 0.48 | 0.4 | 0.6 | 0.4545 | 0.6522 | 0.5273 | | 0.6302 | 43.4 | 2300 | 0.6866 | 0.5849 | 0.6071 | 0.4722 | 0.85 | 0.4242 | 0.8235 | 0.6371 | | 0.0793 | 45.28 | 2400 | 0.7791 | 0.5094 | 0.5517 | 0.4211 | 0.8 | 0.3333 | 0.7333 | 0.5667 | | 0.464 | 47.17 | 2500 | 0.8116 | 0.4340 | 0.4444 | 0.3529 | 0.6 | 0.3333 | 0.5789 | 0.4667 | | 0.6131 | 49.06 | 2600 | 0.5970 | 0.6226 | 0.5455 | 0.5 | 0.6 | 0.6364 | 0.7241 | 0.6182 | | 0.6937 | 50.94 | 2700 | 0.8201 | 0.4340 | 0.4 | 0.3333 | 0.5 | 0.3939 | 0.5652 | 0.4470 | | 0.6552 | 52.83 | 2800 | 0.7168 | 0.5660 | 0.5306 | 0.4483 | 0.65 | 0.5152 | 0.7083 | 0.5826 | | 0.7749 | 54.72 | 2900 | 0.6875 | 0.5849 | 0.5217 | 0.4615 | 0.6 | 0.5758 | 0.7037 | 0.5879 | | 0.9482 | 56.6 | 3000 | 0.6392 | 0.6226 | 0.6296 | 0.5 | 0.85 | 0.4848 | 0.8421 | 0.6674 | | 0.2467 | 58.49 | 3100 | 0.6281 | 0.6038 | 0.5333 | 0.48 | 0.6 | 0.6061 | 0.7143 | 0.6030 | | 0.2903 | 60.38 | 3200 | 0.7383 | 0.5472 | 0.5556 | 0.4412 | 0.75 | 0.4242 | 0.7368 | 0.5871 | | 0.5859 | 62.26 | 3300 | 0.7191 | 0.6226 | 0.5652 | 0.5 | 0.65 | 0.6061 | 0.7407 | 0.6280 | | 0.3815 | 64.15 | 3400 | 0.7469 | 0.5283 | 0.4444 | 0.4 | 0.5 | 0.5455 | 0.6429 | 0.5227 | | 0.531 | 66.04 | 3500 | 0.7566 | 0.6226 | 0.5652 | 0.5 | 0.65 | 0.6061 | 0.7407 | 0.6280 | | 0.3892 | 67.92 | 3600 | 0.8168 | 0.5660 | 0.5490 | 0.4516 | 0.7 | 0.4848 | 0.7273 | 0.5924 | | 0.6487 | 69.81 | 3700 | 0.9077 | 0.4340 | 0.4643 | 0.3611 | 0.65 | 0.3030 | 0.5882 | 0.4765 | | 0.5525 | 71.7 | 3800 | 0.6961 | 0.6038 | 0.5116 | 0.4783 | 0.55 | 0.6364 | 0.7 | 0.5932 | | 0.3137 | 73.58 | 3900 | 1.0817 | 0.3774 | 0.4590 | 0.3415 | 0.7 | 0.1818 | 0.5 | 0.4409 | | 0.3526 | 75.47 | 4000 | 0.7684 | 0.5472 | 0.5862 | 0.4474 | 0.85 | 0.3636 | 0.8 | 0.6068 | | 0.5938 | 77.36 | 4100 | 0.8786 | 0.4340 | 0.4828 | 0.3684 | 0.7 | 0.2727 | 0.6 | 0.4864 | | 0.2431 | 79.25 | 4200 | 0.8925 | 0.4151 | 0.4746 | 0.3590 | 0.7 | 0.2424 | 0.5714 | 0.4712 | | 0.1021 | 81.13 | 4300 | 1.0740 | 0.4528 | 0.4727 | 0.3714 | 0.65 | 0.3333 | 0.6111 | 0.4917 | | 0.3429 | 83.02 | 4400 | 0.7723 | 0.4906 | 0.5091 | 0.4 | 0.7 | 0.3636 | 0.6667 | 0.5318 | | 0.3836 | 84.91 | 4500 | 0.7247 | 0.5472 | 0.5556 | 0.4412 | 0.75 | 0.4242 | 0.7368 | 0.5871 | | 0.4099 | 86.79 | 4600 | 0.8508 | 0.4340 | 0.4828 | 0.3684 | 0.7 | 0.2727 | 0.6 | 0.4864 | | 0.8264 | 88.68 | 4700 | 0.7682 | 0.5849 | 0.5769 | 0.4688 | 0.75 | 0.4848 | 0.7619 | 0.6174 | | 0.1928 | 90.57 | 4800 | 0.8738 | 0.4906 | 0.5574 | 0.4146 | 0.85 | 0.2727 | 0.75 | 0.5614 | | 0.3422 | 92.45 | 4900 | 0.8810 | 0.5660 | 0.5965 | 0.4595 | 0.85 | 0.3939 | 0.8125 | 0.6220 | | 0.5524 | 94.34 | 5000 | 1.0801 | 0.3774 | 0.4923 | 0.3556 | 0.8 | 0.1212 | 0.5 | 0.4606 | | 0.464 | 96.23 | 5100 | 0.9417 | 0.5283 | 0.5902 | 0.4390 | 0.9 | 0.3030 | 0.8333 | 0.6015 | | 0.7182 | 98.11 | 5200 | 1.0335 | 0.4151 | 0.4746 | 0.3590 | 0.7 | 0.2424 | 0.5714 | 0.4712 | | 0.604 | 100.0 | 5300 | 0.6947 | 0.6604 | 0.6087 | 0.5385 | 0.7 | 0.6364 | 0.7778 | 0.6682 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "event", "no_event" ]
santiagoahl/vit_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "angular_leaf_spot", "bean_rust", "healthy" ]
platzi/platzi-beans-beit-model-eduardo-ag
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-beans-beit-model-eduardo-ag This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.3782 - Accuracy: 0.8496 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6894 | 3.85 | 500 | 0.3782 | 0.8496 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "angular_leaf_spot", "bean_rust", "healthy" ]
thaonguyen274/resnet-50-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet-50-finetuned-eurosat This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.9095 - Accuracy: 0.8240 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.78 | 0.96 | 17 | 1.7432 | 0.4321 | | 1.7105 | 1.96 | 34 | 1.6596 | 0.6307 | | 1.6045 | 2.96 | 51 | 1.5369 | 0.6758 | | 1.6526 | 3.96 | 68 | 1.4111 | 0.7139 | | 1.4018 | 4.96 | 85 | 1.2686 | 0.7602 | | 1.2812 | 5.96 | 102 | 1.1433 | 0.7714 | | 1.3282 | 6.96 | 119 | 1.0643 | 0.7910 | | 1.1246 | 7.96 | 136 | 0.9794 | 0.8133 | | 1.0731 | 8.96 | 153 | 0.9279 | 0.8087 | | 1.0531 | 9.96 | 170 | 0.9095 | 0.8240 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "buildings", "forest", "glacier", "mountain", "sea", "street" ]
GHonem/swin-finetuned-food101
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-finetuned-food101 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224](https://huggingface.co/microsoft/swin-base-patch4-window7-224) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 0.3254 - Accuracy: 0.9081 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5074 | 1.0 | 1183 | 0.3254 | 0.9081 | ### Framework versions - Transformers 4.25.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.6.2.dev0 - Tokenizers 0.13.1
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]