model_id
stringlengths
7
105
model_card
stringlengths
1
130k
model_labels
listlengths
2
80k
polejowska/convnext-tiny-224-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224-eurosat This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.3153 - Accuracy: 0.9537 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.863 | 0.98 | 33 | 1.5775 | 0.7619 | | 1.039 | 1.98 | 66 | 0.8142 | 0.9008 | | 0.5825 | 2.98 | 99 | 0.4442 | 0.9339 | | 0.3228 | 3.98 | 132 | 0.3153 | 0.9537 | | 0.2641 | 4.98 | 165 | 0.2868 | 0.9524 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
Neruoy/swin-finetuned-food101
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-finetuned-food101 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224](https://huggingface.co/microsoft/swin-base-patch4-window7-224) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 0.4401 - Accuracy: 0.9220 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.0579 | 1.0 | 1183 | 0.4190 | 0.9102 | | 0.0129 | 2.0 | 2366 | 0.4179 | 0.9155 | | 0.0076 | 3.0 | 3549 | 0.4219 | 0.9198 | | 0.0197 | 4.0 | 4732 | 0.4487 | 0.9160 | | 0.0104 | 5.0 | 5915 | 0.4414 | 0.9210 | | 0.0007 | 6.0 | 7098 | 0.4401 | 0.9220 | | 0.0021 | 7.0 | 8281 | 0.4401 | 0.9220 | | 0.0015 | 8.0 | 9464 | 0.4401 | 0.9220 | | 0.0056 | 9.0 | 10647 | 0.4401 | 0.9220 | | 0.0019 | 10.0 | 11830 | 0.4401 | 0.9220 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
Squiggles112/autotrain-hamantest-2444675858
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2444675858 - CO2 Emissions (in grams): 0.3690 ## Validation Metrics - Loss: 0.634 - Accuracy: 0.800 - Precision: 0.000 - Recall: 0.000 - AUC: 0.000 - F1: 0.000
[ "haman_jpeg", "haman_png" ]
taraqur/blossom-vit
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # blossom-vit This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 345, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results ### Framework versions - Transformers 4.25.1 - TensorFlow 2.10.0 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "daisy", "rose", "tulip", "dandelion", "sunflower" ]
Neruoy/swin-finetuned-food101-e3
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-finetuned-food101-e3 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224](https://huggingface.co/microsoft/swin-base-patch4-window7-224) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 0.2714 - Accuracy: 0.9227 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5565 | 1.0 | 1183 | 0.3939 | 0.8856 | | 0.3466 | 2.0 | 2366 | 0.2936 | 0.9156 | | 0.1172 | 3.0 | 3549 | 0.2714 | 0.9227 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
polejowska/swin-tiny-patch4-window7-224-lcbsi-wbc
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-lcbsi-wbc This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0307 - Accuracy: 0.9933 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.3668 | 0.98 | 27 | 0.6977 | 0.76 | | 0.217 | 1.98 | 54 | 0.0992 | 0.972 | | 0.102 | 2.98 | 81 | 0.0573 | 0.9853 | | 0.0762 | 3.98 | 108 | 0.1003 | 0.976 | | 0.0456 | 4.98 | 135 | 0.0307 | 0.9933 | | 0.0219 | 5.98 | 162 | 0.0497 | 0.9907 | | 0.0106 | 6.98 | 189 | 0.0568 | 0.9867 | | 0.0112 | 7.98 | 216 | 0.0532 | 0.9907 | | 0.0067 | 8.98 | 243 | 0.0528 | 0.9907 | | 0.008 | 9.98 | 270 | 0.0482 | 0.992 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "basophil", "eosinophil", "lymphocyte", "monocyte", "neutrophil" ]
polejowska/vit-base-patch16-224-in21k-lcbsi
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-in21k-lcbsi This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0690 - Accuracy: 0.9853 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5933 | 0.98 | 27 | 0.1754 | 0.9587 | | 0.2672 | 1.98 | 54 | 0.1286 | 0.964 | | 0.153 | 2.98 | 81 | 0.1375 | 0.9627 | | 0.0838 | 3.98 | 108 | 0.1100 | 0.9733 | | 0.0457 | 4.98 | 135 | 0.1678 | 0.952 | | 0.0366 | 5.98 | 162 | 0.0744 | 0.9813 | | 0.0108 | 6.98 | 189 | 0.0834 | 0.9827 | | 0.0075 | 7.98 | 216 | 0.0690 | 0.9853 | | 0.0051 | 8.98 | 243 | 0.0739 | 0.984 | | 0.0028 | 9.98 | 270 | 0.0731 | 0.984 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "basophil", "eosinophil", "lymphocyte", "monocyte", "neutrophil" ]
polejowska/vit-base-xray-pneumonia-lcbsi
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-xray-pneumonia-lcbsi This model is a fine-tuned version of [nickmuchi/vit-base-xray-pneumonia](https://huggingface.co/nickmuchi/vit-base-xray-pneumonia) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3775 - Accuracy: 0.9773 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.5887 | 0.98 | 27 | 1.4949 | 0.4413 | | 1.3065 | 1.98 | 54 | 1.1940 | 0.888 | | 0.9621 | 2.98 | 81 | 0.9100 | 0.9493 | | 0.792 | 3.98 | 108 | 0.7025 | 0.9653 | | 0.5976 | 4.98 | 135 | 0.5612 | 0.972 | | 0.4804 | 5.98 | 162 | 0.4705 | 0.9747 | | 0.4194 | 6.98 | 189 | 0.4131 | 0.976 | | 0.373 | 7.98 | 216 | 0.3775 | 0.9773 | | 0.3402 | 8.98 | 243 | 0.3616 | 0.976 | | 0.3308 | 9.98 | 270 | 0.3538 | 0.976 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "basophil", "eosinophil", "lymphocyte", "monocyte", "neutrophil" ]
micole66/autotrain-feet-typelok-2473576411
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2473576411 - CO2 Emissions (in grams): 0.5535 ## Validation Metrics - Loss: 0.279 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "other", "pachyderm feet" ]
Efimov6886/row4_accu100
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2477076724 - CO2 Emissions (in grams): 0.0039 ## Validation Metrics - Loss: 0.021 - Accuracy: 0.990 - Macro F1: 0.990 - Micro F1: 0.990 - Weighted F1: 0.990 - Macro Precision: 0.990 - Micro Precision: 0.990 - Weighted Precision: 0.990 - Macro Recall: 0.990 - Micro Recall: 0.990 - Weighted Recall: 0.990
[ "animals", "dance", "food", "sport", "tech" ]
Efimov6886/row4_98
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2477076728 - CO2 Emissions (in grams): 1.8937 ## Validation Metrics - Loss: 0.047 - Accuracy: 0.980 - Macro F1: 0.980 - Micro F1: 0.980 - Weighted F1: 0.980 - Macro Precision: 0.980 - Micro Precision: 0.980 - Weighted Precision: 0.980 - Macro Recall: 0.980 - Micro Recall: 0.980 - Weighted Recall: 0.980
[ "animals", "dance", "food", "sport", "tech" ]
Efimov6886/row5_100
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2488976797 - CO2 Emissions (in grams): 2.6475 ## Validation Metrics - Loss: 0.000 - Accuracy: 1.000 - Macro F1: 1.000 - Micro F1: 1.000 - Weighted F1: 1.000 - Macro Precision: 1.000 - Micro Precision: 1.000 - Weighted Precision: 1.000 - Macro Recall: 1.000 - Micro Recall: 1.000 - Weighted Recall: 1.000
[ "animals", "dance", "food", "sport", "tech" ]
sasha/autotrain-butterfly-similarity-2490576840
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2490576840 - CO2 Emissions (in grams): 21.2638 ## Validation Metrics - Loss: 1.818 - Accuracy: 0.609 - Macro F1: 0.409 - Micro F1: 0.609 - Weighted F1: 0.559 - Macro Precision: 0.404 - Micro Precision: 0.609 - Weighted Precision: 0.542 - Macro Recall: 0.446 - Micro Recall: 0.609 - Weighted Recall: 0.609
[ "abaeis mexicana", "abaeis nicippe", "adelpha pithys", "callophrys dumetorum", "callophrys eryphon", "callophrys gryneus", "callophrys henrici", "callophrys hesseli", "callophrys irus", "callophrys lanoraieensis", "callophrys muiri", "callophrys niphon", "callophrys polios", "aglais caschmirensis", "callophrys rubi", "callophrys spinetorum", "calpodes ethlius", "calycopis cecrops", "calycopis isobeon", "calydna sturnula", "caria castalia", "carterocephalus mandan", "carterocephalus palaemon", "carterocephalus silvicola", "aglais io", "carterocephalus skada", "castalius rosimon", "catagramma pygas", "catasticta nimbice nimbice", "catopsilia pomona", "celastrina argiolus", "celastrina echo", "celastrina lucia", "celastrina neglecta", "celotes nessus", "aglais milberti", "cercyonis pegala", "cethosia cyane", "charaxes bernardus", "charaxes jasius", "charaxes sempronius", "charaxes solon", "charis anius", "chiomara georgina", "chiomara georgina georgina", "chlosyne acastus", "aglais rizana", "chlosyne californica", "chlosyne cyneas", "chlosyne ehrenbergii", "chlosyne harrisii", "chlosyne lacinia", "chlosyne lacinia crocale", "chlosyne lacinia lacinia", "chlosyne nycteis", "chlosyne palla", "chlosyne theona", "aglais urticae", "chlosyne theona minimus", "cigaritis lohita", "cigaritis natalensis", "cigaritis syama", "cigaritis vulcanus", "coenonympha california", "coenonympha pamphilus", "colias croceus", "colias eurytheme", "colias philodice", "agriades glandon", "colobura dirce", "colobura dirce dirce", "colotis fausta", "colotis pallene", "copaeodes minima", "cressida cressida", "cupido amyntula", "cupido argiades", "cupido comyntas", "cyllopsis pertepida", "altinote ozomene", "cyrestis nivea", "cyrestis thyodamas", "danaus chrysippus", "danaus chrysippus orientis", "danaus eresimus", "danaus erippus", "danaus genutia", "danaus gilippus", "danaus gilippus thersippus", "danaus melanippus", "amarynthis meneria", "danaus petilia", "danaus plexippus", "danaus plexippus plexippus", "delias acalis", "delias eucharis", "delias harpalyce", "delias hyparete", "delias pasithoe", "diaethria anna", "diaethria candrena", "amathusia phidippus", "diaethria clymena", "dichorragia nesimachus", "dione juno", "dione moneta", "dione moneta poeyii", "dione vanillae", "dione vanillae incarnata", "dione vanillae maculosa", "dismorphiinae", "dispar compacta", "abantis paradisea", "anaea andria", "doxocopa kallina", "doxocopa laure", "doxocopa laurentia", "doxocopa pavon", "dryadula phaetusa", "dryas iulia", "dryas iulia moderata", "dynamine dyonis", "dynamine postverta", "dynamine tithia", "anartia amathea", "eantis tamenund", "echinargus isola", "electrostrymon angelia", "elymnias caudata", "elymnias hypermnestra", "epargyreus clarus", "ephyriades brunnea", "epiphile epimenes", "epiphile orea", "episcada hymenaea", "anartia fatima", "epityches eupompe", "erebia aethiops", "erebia epipsodea", "erebia ligea", "erebia medusa", "erebia pronoe", "erora laeta", "erynnis baptisiae", "erynnis funeralis", "erynnis horatius", "anartia fatima fatima", "erynnis icelus", "erynnis juvenalis", "erynnis propertius", "erynnis tages", "erynnis tristis", "erynnis zarucco", "euchrysops cnejus", "eueides isabella", "eueides isabella dianasa", "eumaeus atala", "anartia jatrophae", "eumaeus childrenae", "eumaeus toxea", "eunica monima", "euphaedra neophron", "euphilotes bernardino", "euphilotes enoptes bayensis", "euphydryas anicia", "euphydryas aurinia", "euphydryas chalcedona", "euphydryas editha", "anatrytone logan", "euphydryas editha bayensis", "euphydryas gillettii", "euphydryas maturna", "euphydryas phaeton", "euphyes dion", "euphyes vestris", "euploea midamus", "euploea mulciber", "euptoieta claudia", "euptoieta hegesia", "ancyloxypha numitor", "euptoieta hegesia meridiania", "eurema daira", "eurema hecabe", "euripus nyctelius", "eurybia lycisca", "eurytides marcellus", "eurytides philolaus", "eurytides philolaus philolaus", "euthalia aconthea", "euthalia lubentina", "anteos maerula", "euthalia monina monina", "euthalia nais", "evenus regalis", "favonius quercus", "feniseca tarquinius", "fountainea nessus", "freyeria putli", "glaucopsyche alexis", "glaucopsyche lygdamus", "glutophrissa drusilla", "anteros carausius", "glutophrissa drusilla tenuis", "gonepteryx cleopatra", "gonepteryx rhamni", "graphium agamemnon", "graphium antiphates", "graphium colonna", "graphium doson", "graphium eurypylus", "graphium macleayanus", "graphium nomius", "anthanassa nebulosa alexon", "graphium policenes", "graphium sarpedon", "graphium teredon", "greta morgane oto", "haemactis sanguinalis", "hamadryas amphinome", "hamadryas epinome", "hamadryas februa", "hamadryas feronia", "hamadryas laodamia", "acraea horta", "anthanassa texana", "hamearis lucina", "heliconius charithonia", "heliconius charithonia vazquezae", "heliconius doris", "heliconius erato", "heliconius erato cruentus", "heliconius ethilla", "heliconius ethilla narcaea", "heliconius hecale", "heliconius hortense", "anthanassa tulcis", "heliopetes ericetorum", "heliophorus epicles", "heliophorus moorei", "heliophorus sena", "hermeuptychia intricata", "hermeuptychia sosybius", "hesperiinae", "hestina assimilis assimilis", "heteronympha merope", "heteropterus morpheus", "anthocharis cardamines", "hylephila phyleus", "hypanartia lethe", "hypaurotis crysalus", "hypolimnas anthedon", "hypolimnas bolina", "hypolimnas bolina kezia", "hypolimnas bolina nerina", "hypolimnas dexithea", "hypolimnas misippus", "hypophylla zeurippa", "anthocharis sara", "hypothyris euclea", "icaricia acmon", "icaricia icarioides", "icaricia lupini", "idea stolli", "ideopsis similis", "iphiclides feisthamelii", "iphiclides podalirius", "iraota timoleon", "issoria lathonia", "apatura ilia", "ithomia agnosia", "ixias pyrene", "jamides celeno", "juditha caucana", "junonia almana", "junonia coenia", "junonia coenia coenia", "junonia genoveva hilaris", "junonia grisea", "junonia hierta", "apatura iris", "junonia hierta cebrene", "junonia iphita", "junonia lemonias", "junonia oenone", "junonia oenone oenone", "junonia orithya", "junonia orithya madagascariensis", "junonia rhadama", "junonia sophia", "junonia villida", "apatura metis", "kallima inachus", "kaniska canace", "lampides boeticus", "lamproptera meges", "lasaia agesilas", "lasaia sula peninsularis", "lasiommata maera", "lasiommata megera", "laxita teneta", "lebadea martha", "aphantopus hyperantus", "leptidea", "leptidea sinapis", "leptotes cassius", "leptotes marina", "leptotes plinius", "lerema accius", "lethe anthedon", "lethe appalachia", "lethe eurydice", "lethe portlandia", "apodemia mejicanus deserti", "lexias dirtea", "lexias dirtea merguia", "lexias pardalis", "libythea celtis", "libytheana carinenta", "limenitis archippus", "limenitis archippus floridensis", "limenitis arthemis", "limenitis arthemis arizonensis", "limenitis arthemis arthemis", "apodemia mormo", "limenitis arthemis arthemis × astyanax", "limenitis arthemis astyanax", "limenitis arthemis rubrofasciata", "limenitis arthemis x archippus", "limenitis camilla", "limenitis lorquini", "limenitis populi", "limenitis reducta", "limenitis weidemeyerii", "lon hobomok", "acraea issoria", "apodemia virgulti", "lon zabulon", "lopinga achine", "luthrodes pandava", "lycaena", "lycaena alciphron", "lycaena boldenarum", "lycaena cupreus", "lycaena dispar", "lycaena hippothoe", "lycaena phlaeas", "aporia crataegi", "lycaena phlaeas hypophlaeas", "lycaena salustius", "lycaena tityrus", "lycaena virgaureae", "lycorea halia", "lyropteryx apollonia", "maniola jurtina", "marpesia chiron", "marpesia corinna", "marpesia petreus", "araschnia levana", "matapa aria", "mechanitis polymnia", "megisto cymela", "melanargia galathea", "melanargia lachesis", "melanargia russiae", "melanis acroleuca", "melanis acroleuca acroleuca", "melanis cephise", "melanis electron", "arawacus meliboeus", "melanis pixe", "melanitis leda", "melanocyma faunula", "melitaea", "melitaea athalia", "melitaea cinxia", "melitaea diamina", "melitaea didyma", "melitaea phoebe", "melitaea trivia", "arawacus separata", "mesosemia lamachus", "mestra amymone", "methona themisto", "microtia elva", "minois dryas", "moduza procris", "morpho helenor", "mycalesis", "mylothris agathina agathina", "myscelia cyananthe", "argopteron aureipennis", "myscelia ethusa", "myscelia orsis", "nathalis iole", "neophasia menapia", "neophasia terlooii", "neptis hylas", "neptis sappho", "nymphalis antiopa", "nymphalis antiopa antiopa", "nymphalis californica", "argynnis adippe", "nymphalis l-album", "nymphalis polychloros", "nymphalis xanthomelas", "nymphidium mantus", "ochlodes sylvanoides", "ochlodes sylvanus", "oleria paula", "opsiphanes cassina fabricii", "oreixenica kershawi", "ornithoptera euphorion", "argynnis hyperbius", "ortilia ithra", "paches loxus", "paches polla", "pachliopta aristolochiae", "panthiades bathildis", "pantoporia sandaka", "papilio aegeus", "papilio alcmenor", "papilio alexanor", "papilio alexiares garcia", "argynnis pandora", "papilio anactus", "papilio androgeus epidaurus", "papilio appalachiensis", "papilio astyalus", "papilio bianor", "papilio brevicauda", "papilio canadensis", "papilio canadensis × glaucus", "papilio cleotas", "papilio clytia", "argynnis paphia", "papilio cresphontes", "papilio crino", "papilio demoleus", "papilio eurymedon", "papilio garamas garamas", "papilio glaucus", "papilio hectorides", "papilio machaon", "papilio machaon bairdii", "papilio memnon", "acraea terpsicore", "argyrophenga antipodum", "papilio menatius victorinus", "papilio multicaudata", "papilio multicaudata multicaudata", "papilio nireus lyaeus", "papilio palamedes", "papilio paris", "papilio polymnestor", "papilio polytes", "papilio polyxenes", "papilio polyxenes asterius", "arhopala amantes", "papilio rogeri pharnaces", "papilio rumiko", "papilio rutulus", "papilio thoas", "papilio thoas autocles", "papilio troilus", "papilio xuthus", "papilio zalmoxis", "papilio zelicaon", "parantica aglea", "ariadne ariadne", "pararge aegeria", "paratrytone snowi", "pareronia hippia", "pareronia valeria", "parides neophilus", "parides photinus", "parnassius apollo", "parnassius clodius", "parnassius mnemosyne", "parnassius phoebus", "aricia agestis", "parnassius smintheus", "parrhasius m-album", "parthenos sylvia", "parvospila emylius", "pelopidas mathias", "phaedyma columella singa", "phalanta phalantha", "phengaris arion", "philaethria wernickei", "philotes sonorensis", "aricia cramera", "phocides belus", "phocides lilea", "phocides pigmalion", "phoebis philea", "phoebis sennae", "phyciodes", "phyciodes cocyta", "phyciodes mylitta", "phyciodes pallescens", "phyciodes phaon", "ascia monuste", "phyciodes picta", "phyciodes pulchella", "phyciodes tharos", "pierella luna", "pieris", "pieris brassicae", "pieris napi", "pieris oleracea", "pieris rapae", "piruna roeveri", "asterocampa celtis", "placidina euryanassa", "plebejus argus", "plebejus idas", "plebejus melissa", "poanes viator", "polites peckius", "polites puxillius", "polites themistocles", "polites vibex", "polygonia", "asterocampa clyton", "polygonia c-album", "polygonia comma", "polygonia egea", "polygonia faunus", "polygonia gracilis", "polygonia interrogationis", "polygonia progne", "polygonia satyrus", "polyommatus bellargus", "polyommatus coridon", "astraptes fulgerator", "polyommatus damon", "polyommatus icarus", "pontia protodice", "potanthus", "potanthus omaha", "precis archesia archesia", "precis octavia sesamus", "prepona laertes", "prioneris thestylis", "prosotas dubiosa", "atalopedes campestris", "protographium epidaus", "protographium epidaus epidaus", "pseudacraea lucretia expansa", "pseudohaetera hypaesia", "pseudozizeeria maha", "psychonotis caelius", "pterourus", "pyrgus malvae", "pyrgus ruralis", "pyrisitia lisa", "actinote anteas", "athyma inara", "pyronia tithonus", "pyrrhogyra neaerea hypsenor", "pythonides lancea", "ragadia makuta", "rapala iarbus", "rapala manea", "rapala varuna", "rathinda amor", "rekoa marius", "rekoa meton", "athyma selenophora", "rhabdodryas trite", "rhetus arcius", "rhetus arcius thia", "rhetus dysonii", "rhetus periander", "riodina lycisca", "sarangesa motozi", "sarota acantus", "sarota chrysus", "satyrium calanus", "atlides halesus", "satyrium caryaevorus", "satyrium edwardsii", "satyrium spini", "satyrium titus", "satyrium w-album", "scolitantides orion", "siproeta epaphus", "siproeta stelenes", "siproeta stelenes biplagiata", "smyrna blomfildia", "atlides polybe", "sostrata cronion", "speyeria adiaste", "speyeria aglaja", "speyeria aphrodite", "speyeria atlantis", "speyeria callippe", "speyeria coronis", "speyeria cybele", "speyeria diana", "speyeria hesperis", "baeotis zonata", "speyeria hydaspe", "speyeria idalia", "speyeria mormonia", "spialia sertorius", "stibochiona nicea", "strephonota tephraeus", "strymon astiocha", "strymon melinus", "symbrenthia lilaea", "symmachia accusatrix", "barbicornis basilis", "tagiades litigiosa", "talicada nyseus", "tanaecia pelea pelea", "tarucus thespis", "taxila haquinus", "tegosa claudina", "texola elada", "tharsalea arota", "tharsalea dorcas", "tharsalea gorgon", "baronia brevicornis brevicornis", "tharsalea helloides", "tharsalea heteronea", "tharsalea hyllus", "tharsalea xanthoides", "thecla betulae", "theclinesthes serpentata", "thorybes dorantes", "thorybes lyciades", "thorybes pylades", "thymelicus", "battus philenor", "thymelicus lineola", "thymelicus sylvestris", "tirumala hamata", "tirumala limniace", "tirumala petiverana", "tirumala septentrionis", "tisiphone abeona", "toxidia doubledayi", "trapezites eliena", "trapezites symmomus", "battus philenor hirsuta", "urbanus proteus", "vanessa", "vanessa annabella", "vanessa atalanta", "vanessa braziliensis", "vanessa cardui", "vanessa carye", "vanessa gonerilla", "vanessa gonerilla gonerilla", "vanessa itea", "battus philenor philenor", "vanessa kershawi", "vanessa terpsichore", "vanessa virginiensis", "vanessa vulcania", "vindula erota", "yanguna cosyra", "ypthima baldus", "ypthima huebneri", "zemeros flegyas", "zerene cesonia", "adelpha californica", "battus polydamas", "zerene eurydice", "zerynthia cassandra", "zerynthia polyxena", "zerynthia rumina", "zesius chrysomallus", "zizina otis", "zizina otis labradus", "zizina oxleyi", "behemothia godmanii", "belenois aurota aurota", "belenois java", "biblis aganisa", "biblis hyperia", "boloria alaskensis halli", "boloria bellona", "boloria chariclea", "boloria dia", "adelpha eulalia", "boloria eunomia", "boloria euphrosyne", "boloria freija", "boloria polaris", "boloria selene", "brenthis daphne", "brephidium exilis", "brintesia circe", "burnsius", "burnsius albescens", "adelpha mythra", "burnsius communis", "burnsius oileus", "burnsius orcus", "byblia ilithyia", "cacyreus marshalli", "calephelis", "calephelis borealis", "calephelis nemesis", "calephelis virginiensis", "callophrys augustinus" ]
sasha/autotrain-butterfly_similarity_swin-2490776951
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2490776951 - CO2 Emissions (in grams): 28.2960 ## Validation Metrics - Loss: 1.385 - Accuracy: 0.689 - Macro F1: 0.488 - Micro F1: 0.689 - Weighted F1: 0.641 - Macro Precision: 0.483 - Micro Precision: 0.689 - Weighted Precision: 0.628 - Macro Recall: 0.528 - Micro Recall: 0.689 - Weighted Recall: 0.689
[ "abaeis mexicana", "abaeis nicippe", "adelpha pithys", "callophrys dumetorum", "callophrys eryphon", "callophrys gryneus", "callophrys henrici", "callophrys hesseli", "callophrys irus", "callophrys lanoraieensis", "callophrys muiri", "callophrys niphon", "callophrys polios", "aglais caschmirensis", "callophrys rubi", "callophrys spinetorum", "calpodes ethlius", "calycopis cecrops", "calycopis isobeon", "calydna sturnula", "caria castalia", "carterocephalus mandan", "carterocephalus palaemon", "carterocephalus silvicola", "aglais io", "carterocephalus skada", "castalius rosimon", "catagramma pygas", "catasticta nimbice nimbice", "catopsilia pomona", "celastrina argiolus", "celastrina echo", "celastrina lucia", "celastrina neglecta", "celotes nessus", "aglais milberti", "cercyonis pegala", "cethosia cyane", "charaxes bernardus", "charaxes jasius", "charaxes sempronius", "charaxes solon", "charis anius", "chiomara georgina", "chiomara georgina georgina", "chlosyne acastus", "aglais rizana", "chlosyne californica", "chlosyne cyneas", "chlosyne ehrenbergii", "chlosyne harrisii", "chlosyne lacinia", "chlosyne lacinia crocale", "chlosyne lacinia lacinia", "chlosyne nycteis", "chlosyne palla", "chlosyne theona", "aglais urticae", "chlosyne theona minimus", "cigaritis lohita", "cigaritis natalensis", "cigaritis syama", "cigaritis vulcanus", "coenonympha california", "coenonympha pamphilus", "colias croceus", "colias eurytheme", "colias philodice", "agriades glandon", "colobura dirce", "colobura dirce dirce", "colotis fausta", "colotis pallene", "copaeodes minima", "cressida cressida", "cupido amyntula", "cupido argiades", "cupido comyntas", "cyllopsis pertepida", "altinote ozomene", "cyrestis nivea", "cyrestis thyodamas", "danaus chrysippus", "danaus chrysippus orientis", "danaus eresimus", "danaus erippus", "danaus genutia", "danaus gilippus", "danaus gilippus thersippus", "danaus melanippus", "amarynthis meneria", "danaus petilia", "danaus plexippus", "danaus plexippus plexippus", "delias acalis", "delias eucharis", "delias harpalyce", "delias hyparete", "delias pasithoe", "diaethria anna", "diaethria candrena", "amathusia phidippus", "diaethria clymena", "dichorragia nesimachus", "dione juno", "dione moneta", "dione moneta poeyii", "dione vanillae", "dione vanillae incarnata", "dione vanillae maculosa", "dismorphiinae", "dispar compacta", "abantis paradisea", "anaea andria", "doxocopa kallina", "doxocopa laure", "doxocopa laurentia", "doxocopa pavon", "dryadula phaetusa", "dryas iulia", "dryas iulia moderata", "dynamine dyonis", "dynamine postverta", "dynamine tithia", "anartia amathea", "eantis tamenund", "echinargus isola", "electrostrymon angelia", "elymnias caudata", "elymnias hypermnestra", "epargyreus clarus", "ephyriades brunnea", "epiphile epimenes", "epiphile orea", "episcada hymenaea", "anartia fatima", "epityches eupompe", "erebia aethiops", "erebia epipsodea", "erebia ligea", "erebia medusa", "erebia pronoe", "erora laeta", "erynnis baptisiae", "erynnis funeralis", "erynnis horatius", "anartia fatima fatima", "erynnis icelus", "erynnis juvenalis", "erynnis propertius", "erynnis tages", "erynnis tristis", "erynnis zarucco", "euchrysops cnejus", "eueides isabella", "eueides isabella dianasa", "eumaeus atala", "anartia jatrophae", "eumaeus childrenae", "eumaeus toxea", "eunica monima", "euphaedra neophron", "euphilotes bernardino", "euphilotes enoptes bayensis", "euphydryas anicia", "euphydryas aurinia", "euphydryas chalcedona", "euphydryas editha", "anatrytone logan", "euphydryas editha bayensis", "euphydryas gillettii", "euphydryas maturna", "euphydryas phaeton", "euphyes dion", "euphyes vestris", "euploea midamus", "euploea mulciber", "euptoieta claudia", "euptoieta hegesia", "ancyloxypha numitor", "euptoieta hegesia meridiania", "eurema daira", "eurema hecabe", "euripus nyctelius", "eurybia lycisca", "eurytides marcellus", "eurytides philolaus", "eurytides philolaus philolaus", "euthalia aconthea", "euthalia lubentina", "anteos maerula", "euthalia monina monina", "euthalia nais", "evenus regalis", "favonius quercus", "feniseca tarquinius", "fountainea nessus", "freyeria putli", "glaucopsyche alexis", "glaucopsyche lygdamus", "glutophrissa drusilla", "anteros carausius", "glutophrissa drusilla tenuis", "gonepteryx cleopatra", "gonepteryx rhamni", "graphium agamemnon", "graphium antiphates", "graphium colonna", "graphium doson", "graphium eurypylus", "graphium macleayanus", "graphium nomius", "anthanassa nebulosa alexon", "graphium policenes", "graphium sarpedon", "graphium teredon", "greta morgane oto", "haemactis sanguinalis", "hamadryas amphinome", "hamadryas epinome", "hamadryas februa", "hamadryas feronia", "hamadryas laodamia", "acraea horta", "anthanassa texana", "hamearis lucina", "heliconius charithonia", "heliconius charithonia vazquezae", "heliconius doris", "heliconius erato", "heliconius erato cruentus", "heliconius ethilla", "heliconius ethilla narcaea", "heliconius hecale", "heliconius hortense", "anthanassa tulcis", "heliopetes ericetorum", "heliophorus epicles", "heliophorus moorei", "heliophorus sena", "hermeuptychia intricata", "hermeuptychia sosybius", "hesperiinae", "hestina assimilis assimilis", "heteronympha merope", "heteropterus morpheus", "anthocharis cardamines", "hylephila phyleus", "hypanartia lethe", "hypaurotis crysalus", "hypolimnas anthedon", "hypolimnas bolina", "hypolimnas bolina kezia", "hypolimnas bolina nerina", "hypolimnas dexithea", "hypolimnas misippus", "hypophylla zeurippa", "anthocharis sara", "hypothyris euclea", "icaricia acmon", "icaricia icarioides", "icaricia lupini", "idea stolli", "ideopsis similis", "iphiclides feisthamelii", "iphiclides podalirius", "iraota timoleon", "issoria lathonia", "apatura ilia", "ithomia agnosia", "ixias pyrene", "jamides celeno", "juditha caucana", "junonia almana", "junonia coenia", "junonia coenia coenia", "junonia genoveva hilaris", "junonia grisea", "junonia hierta", "apatura iris", "junonia hierta cebrene", "junonia iphita", "junonia lemonias", "junonia oenone", "junonia oenone oenone", "junonia orithya", "junonia orithya madagascariensis", "junonia rhadama", "junonia sophia", "junonia villida", "apatura metis", "kallima inachus", "kaniska canace", "lampides boeticus", "lamproptera meges", "lasaia agesilas", "lasaia sula peninsularis", "lasiommata maera", "lasiommata megera", "laxita teneta", "lebadea martha", "aphantopus hyperantus", "leptidea", "leptidea sinapis", "leptotes cassius", "leptotes marina", "leptotes plinius", "lerema accius", "lethe anthedon", "lethe appalachia", "lethe eurydice", "lethe portlandia", "apodemia mejicanus deserti", "lexias dirtea", "lexias dirtea merguia", "lexias pardalis", "libythea celtis", "libytheana carinenta", "limenitis archippus", "limenitis archippus floridensis", "limenitis arthemis", "limenitis arthemis arizonensis", "limenitis arthemis arthemis", "apodemia mormo", "limenitis arthemis arthemis × astyanax", "limenitis arthemis astyanax", "limenitis arthemis rubrofasciata", "limenitis arthemis x archippus", "limenitis camilla", "limenitis lorquini", "limenitis populi", "limenitis reducta", "limenitis weidemeyerii", "lon hobomok", "acraea issoria", "apodemia virgulti", "lon zabulon", "lopinga achine", "luthrodes pandava", "lycaena", "lycaena alciphron", "lycaena boldenarum", "lycaena cupreus", "lycaena dispar", "lycaena hippothoe", "lycaena phlaeas", "aporia crataegi", "lycaena phlaeas hypophlaeas", "lycaena salustius", "lycaena tityrus", "lycaena virgaureae", "lycorea halia", "lyropteryx apollonia", "maniola jurtina", "marpesia chiron", "marpesia corinna", "marpesia petreus", "araschnia levana", "matapa aria", "mechanitis polymnia", "megisto cymela", "melanargia galathea", "melanargia lachesis", "melanargia russiae", "melanis acroleuca", "melanis acroleuca acroleuca", "melanis cephise", "melanis electron", "arawacus meliboeus", "melanis pixe", "melanitis leda", "melanocyma faunula", "melitaea", "melitaea athalia", "melitaea cinxia", "melitaea diamina", "melitaea didyma", "melitaea phoebe", "melitaea trivia", "arawacus separata", "mesosemia lamachus", "mestra amymone", "methona themisto", "microtia elva", "minois dryas", "moduza procris", "morpho helenor", "mycalesis", "mylothris agathina agathina", "myscelia cyananthe", "argopteron aureipennis", "myscelia ethusa", "myscelia orsis", "nathalis iole", "neophasia menapia", "neophasia terlooii", "neptis hylas", "neptis sappho", "nymphalis antiopa", "nymphalis antiopa antiopa", "nymphalis californica", "argynnis adippe", "nymphalis l-album", "nymphalis polychloros", "nymphalis xanthomelas", "nymphidium mantus", "ochlodes sylvanoides", "ochlodes sylvanus", "oleria paula", "opsiphanes cassina fabricii", "oreixenica kershawi", "ornithoptera euphorion", "argynnis hyperbius", "ortilia ithra", "paches loxus", "paches polla", "pachliopta aristolochiae", "panthiades bathildis", "pantoporia sandaka", "papilio aegeus", "papilio alcmenor", "papilio alexanor", "papilio alexiares garcia", "argynnis pandora", "papilio anactus", "papilio androgeus epidaurus", "papilio appalachiensis", "papilio astyalus", "papilio bianor", "papilio brevicauda", "papilio canadensis", "papilio canadensis × glaucus", "papilio cleotas", "papilio clytia", "argynnis paphia", "papilio cresphontes", "papilio crino", "papilio demoleus", "papilio eurymedon", "papilio garamas garamas", "papilio glaucus", "papilio hectorides", "papilio machaon", "papilio machaon bairdii", "papilio memnon", "acraea terpsicore", "argyrophenga antipodum", "papilio menatius victorinus", "papilio multicaudata", "papilio multicaudata multicaudata", "papilio nireus lyaeus", "papilio palamedes", "papilio paris", "papilio polymnestor", "papilio polytes", "papilio polyxenes", "papilio polyxenes asterius", "arhopala amantes", "papilio rogeri pharnaces", "papilio rumiko", "papilio rutulus", "papilio thoas", "papilio thoas autocles", "papilio troilus", "papilio xuthus", "papilio zalmoxis", "papilio zelicaon", "parantica aglea", "ariadne ariadne", "pararge aegeria", "paratrytone snowi", "pareronia hippia", "pareronia valeria", "parides neophilus", "parides photinus", "parnassius apollo", "parnassius clodius", "parnassius mnemosyne", "parnassius phoebus", "aricia agestis", "parnassius smintheus", "parrhasius m-album", "parthenos sylvia", "parvospila emylius", "pelopidas mathias", "phaedyma columella singa", "phalanta phalantha", "phengaris arion", "philaethria wernickei", "philotes sonorensis", "aricia cramera", "phocides belus", "phocides lilea", "phocides pigmalion", "phoebis philea", "phoebis sennae", "phyciodes", "phyciodes cocyta", "phyciodes mylitta", "phyciodes pallescens", "phyciodes phaon", "ascia monuste", "phyciodes picta", "phyciodes pulchella", "phyciodes tharos", "pierella luna", "pieris", "pieris brassicae", "pieris napi", "pieris oleracea", "pieris rapae", "piruna roeveri", "asterocampa celtis", "placidina euryanassa", "plebejus argus", "plebejus idas", "plebejus melissa", "poanes viator", "polites peckius", "polites puxillius", "polites themistocles", "polites vibex", "polygonia", "asterocampa clyton", "polygonia c-album", "polygonia comma", "polygonia egea", "polygonia faunus", "polygonia gracilis", "polygonia interrogationis", "polygonia progne", "polygonia satyrus", "polyommatus bellargus", "polyommatus coridon", "astraptes fulgerator", "polyommatus damon", "polyommatus icarus", "pontia protodice", "potanthus", "potanthus omaha", "precis archesia archesia", "precis octavia sesamus", "prepona laertes", "prioneris thestylis", "prosotas dubiosa", "atalopedes campestris", "protographium epidaus", "protographium epidaus epidaus", "pseudacraea lucretia expansa", "pseudohaetera hypaesia", "pseudozizeeria maha", "psychonotis caelius", "pterourus", "pyrgus malvae", "pyrgus ruralis", "pyrisitia lisa", "actinote anteas", "athyma inara", "pyronia tithonus", "pyrrhogyra neaerea hypsenor", "pythonides lancea", "ragadia makuta", "rapala iarbus", "rapala manea", "rapala varuna", "rathinda amor", "rekoa marius", "rekoa meton", "athyma selenophora", "rhabdodryas trite", "rhetus arcius", "rhetus arcius thia", "rhetus dysonii", "rhetus periander", "riodina lycisca", "sarangesa motozi", "sarota acantus", "sarota chrysus", "satyrium calanus", "atlides halesus", "satyrium caryaevorus", "satyrium edwardsii", "satyrium spini", "satyrium titus", "satyrium w-album", "scolitantides orion", "siproeta epaphus", "siproeta stelenes", "siproeta stelenes biplagiata", "smyrna blomfildia", "atlides polybe", "sostrata cronion", "speyeria adiaste", "speyeria aglaja", "speyeria aphrodite", "speyeria atlantis", "speyeria callippe", "speyeria coronis", "speyeria cybele", "speyeria diana", "speyeria hesperis", "baeotis zonata", "speyeria hydaspe", "speyeria idalia", "speyeria mormonia", "spialia sertorius", "stibochiona nicea", "strephonota tephraeus", "strymon astiocha", "strymon melinus", "symbrenthia lilaea", "symmachia accusatrix", "barbicornis basilis", "tagiades litigiosa", "talicada nyseus", "tanaecia pelea pelea", "tarucus thespis", "taxila haquinus", "tegosa claudina", "texola elada", "tharsalea arota", "tharsalea dorcas", "tharsalea gorgon", "baronia brevicornis brevicornis", "tharsalea helloides", "tharsalea heteronea", "tharsalea hyllus", "tharsalea xanthoides", "thecla betulae", "theclinesthes serpentata", "thorybes dorantes", "thorybes lyciades", "thorybes pylades", "thymelicus", "battus philenor", "thymelicus lineola", "thymelicus sylvestris", "tirumala hamata", "tirumala limniace", "tirumala petiverana", "tirumala septentrionis", "tisiphone abeona", "toxidia doubledayi", "trapezites eliena", "trapezites symmomus", "battus philenor hirsuta", "urbanus proteus", "vanessa", "vanessa annabella", "vanessa atalanta", "vanessa braziliensis", "vanessa cardui", "vanessa carye", "vanessa gonerilla", "vanessa gonerilla gonerilla", "vanessa itea", "battus philenor philenor", "vanessa kershawi", "vanessa terpsichore", "vanessa virginiensis", "vanessa vulcania", "vindula erota", "yanguna cosyra", "ypthima baldus", "ypthima huebneri", "zemeros flegyas", "zerene cesonia", "adelpha californica", "battus polydamas", "zerene eurydice", "zerynthia cassandra", "zerynthia polyxena", "zerynthia rumina", "zesius chrysomallus", "zizina otis", "zizina otis labradus", "zizina oxleyi", "behemothia godmanii", "belenois aurota aurota", "belenois java", "biblis aganisa", "biblis hyperia", "boloria alaskensis halli", "boloria bellona", "boloria chariclea", "boloria dia", "adelpha eulalia", "boloria eunomia", "boloria euphrosyne", "boloria freija", "boloria polaris", "boloria selene", "brenthis daphne", "brephidium exilis", "brintesia circe", "burnsius", "burnsius albescens", "adelpha mythra", "burnsius communis", "burnsius oileus", "burnsius orcus", "byblia ilithyia", "cacyreus marshalli", "calephelis", "calephelis borealis", "calephelis nemesis", "calephelis virginiensis", "callophrys augustinus" ]
sasha/autotrain-sea-slug-similarity-2498977005
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2498977005 - CO2 Emissions (in grams): 13.7591 ## Validation Metrics - Loss: 0.757 - Accuracy: 0.837 - Macro F1: 0.778 - Micro F1: 0.837 - Weighted F1: 0.816 - Macro Precision: 0.787 - Micro Precision: 0.837 - Weighted Precision: 0.825 - Macro Recall: 0.796 - Micro Recall: 0.837 - Weighted Recall: 0.837
[ "(a. adams, 1855)", "(baba, 1938)", "(ruppell & leuckart, 1828)", "glaucus atlanticus", "glaucus marginatus", "glossodoris aeruginosa", "glossodoris atromarginata", "glossodoris averni", "glossodoris carlsoni", "glossodoris cincta", "glossodoris electra", "glossodoris hikuerensis", "glossodoris pullata", "(ruppell & leuckart, 1831)", "glossodoris rubroannulata", "glossodoris vespa", "godiva quadricolor", "goniodoridella savignyi", "goniodoris joubini", "gosliner & behrens, 2000", "gosliner & willan, 1991", "gosliner, 1989", "gosliner, 1995", "gymnodoris alba", "actinocyclus verrucosus", "gymnodoris bicolor", "gymnodoris cf. nigricolor", "gymnodoris okinawae", "halgerda albocristata", "halgerda aurantiomaculata", "halgerda tessellata", "halgerda willeyi", "hallaxa cryptica", "hallaxa fuscescens", "hallaxa iju", "aegires flores", "hallaxa indecora", "hallaxa translucens", "haminoea fusca", "hedley, 1902", "herviella albida", "herviella claror", "hexabranchus sanguineus", "hoplodoris nodulosa", "hydatina physis", "hypselodoris bullockii", "aegires gardineri", "hypselodoris jacksoni", "hypselodoris maritima", "hypselodoris obscura", "hypselodoris sagamiensis", "hypselodoris whitei", "ichikawa, 1993", "jensen, 1993", "jorunna pantherina", "kaloplocamus acutus", "lobiger souverbii", "aegires hapsis", "lomanotus vermiformis", "madrella ferruginosa", "marianina rosea", "marionia pustulosa", "mexichromis festiva", "mexichromis macropus", "micromelo undata", "miller, 1996", "nembrotha purpureolineata", "noumea alboannulata", "aegires incusus", "noumea crocea", "noumea flava", "noumea laboutei", "noumea norba", "noumea romeri", "noumea simplex", "noumea verconiforma", "odhner, 1941", "okenia hallucigenia", "okenia pellucida", "aegires villosus", "okenia plana", "oxynoe viridis", "pease, 1866", "pectenodoris trilineata", "phestilla melanobrachia", "phestilla minor", "phidiana bourailli", "phidiana indica", "philinopsis pilsbryi", "phyllidia ocellata", "aeolidiella alba", "phyllidia babai", "phyllidia coelestis", "phyllidia elegans", "phyllidia exquisita", "phyllidia picta", "phyllidia varicosa", "phyllidiella lizae", "phyllidiella pustulosa", "phyllidiopsis cardinalis", "phyllidiopsis fissurata", "analogium amakusanum", "phyllidiopsis loricata", "phyllodesmium colemani", "phyllodesmium crypticum", "phyllodesmium macphersonae", "phyllodesmium magnum", "placida cremoniana", "platydoris cruenta", "platydoris formosa", "platydoris inframaculata", "platydoris sabulosa", "(baba, 1955)", "aplysia dactylomela", "pleurobranchus albiguttatus", "pleurobranchus caledonicus", "pleurobranchus peronii", "plocamopherus ceylonicus", "plocamopherus imperialis", "polybranchia orientalis", "protaeolidiella juliae", "reticulidia halgerda", "risbecia godeffroyana", "risbecia tryoni", "aplysia parvula", "roboastra luteolineata", "rostanga arbutus", "rostanga bifurcata", "rudman, 1995", "runcina fijiensis", "sagaminopteron ornatum", "sagaminopteron psychedelicum", "sclerodoris apiculata", "sclerodoris cf. coriacea", "scyllaea pelagica", "aplysia sowerbyi", "siraius nucleola", "stylocheilus striatus", "tamanovalva limax", "tambja amakusana", "tambja limaciformis", "tambja morosa", "tambja tenuilineata", "tambja victoriae", "thompson 1972", "thorunna australis", "atagema albata", "thorunna daniellae", "thorunna florens", "thorunna halourga", "thuridilla albopustulosa", "thuridilla carlsoni", "thuridilla multimarginata", "thuridilla splendens", "thuridilla vatae", "trinchesia", "trinchesia sibogae", "atagema ornata", "trinchesia yamasui", "tritoniopsis alba", "tylodina corticalis", "vayssierea caledonica", "volvatella angeliniana", "atagema spongiosa", "austraeolis ornata", "baba & abe, 1970", "bergh, 1888", "bergh, 1905", "(baba,1955)", "berthella martensi", "berthella stellata", "berthellina citrina", "bornella anguilla", "bornella stellifer", "bulla vernicosa", "bullina lineata", "burghardt, 2006", "cadlinella ornatissima", "ceratosoma magnificum", "(bergh, 1890)", "ceratosoma moloch", "ceratosoma sinuatum", "ceratosoma tenue", "cerberilla affinis", "cerberilla ambonensis", "chelidonura electra", "chelidonura hirundinina", "chelidonura inornata", "chromodoris albonares", "chromodoris albopunctata", "(crosse & fischer, 1865)", "chromodoris aspersa", "chromodoris burni", "chromodoris coi", "chromodoris daphne", "chromodoris decora", "chromodoris elisabethina", "chromodoris geometrica", "chromodoris kuiteri", "chromodoris kuniei", "chromodoris leopardus", "(elliot, 1903)", "chromodoris magnifica", "chromodoris roboi", "chromodoris rufomaculata", "chromodoris splendida", "chromodoris striatella", "chromodoris strigata", "chromodoris tinctoria", "chromodoris willani", "colpodaspis thompsoni", "cratena cf. affinis", "(h. & a. adams, 1854)", "cratena simba", "crimora edwardsi", "cyerce cf. pavonina", "cyerce nigra", "dendrodoris albobrunnea", "dendrodoris coronata", "dendrodoris denisoni", "dendrodoris fumata", "dendrodoris nigra", "dermatobranchus fortunata", "(lightfoot, 1786)", "dermatobranchus nigropunctatus", "dermatobranchus ornatus", "dermatobranchus primus", "diaphorodoris mitsuii", "discodoris fragilis", "discodoris palma", "diversidoris aurantionodulosa", "dolabrifera brazieri", "dolabrifera dolabrifera", "doriopsis pecten", "(risbec, 1928)", "doto cf. pita", "durvilledoris pusilla", "elysia obtusa", "elysia verrucosa", "elysiella pusilla", "favorinus japonicus", "fiona pinnata", "flabellina bicolor", "flabellina bilas", "flabellina rubrolineata" ]
polejowska/vit-convnext-tiny-224-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-convnext-tiny-224-eurosat This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0576 - Accuracy: 0.9859 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2881 | 0.99 | 147 | 0.2325 | 0.9588 | | 0.0869 | 1.99 | 294 | 0.0912 | 0.9753 | | 0.0687 | 2.99 | 441 | 0.0663 | 0.9805 | | 0.0272 | 3.99 | 588 | 0.0576 | 0.9859 | | 0.0247 | 4.99 | 735 | 0.0532 | 0.9854 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
polejowska/vit-vit-base-patch16-224-in21k-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-vit-base-patch16-224-in21k-eurosat This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0957 - Accuracy: 0.9886 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3303 | 0.99 | 147 | 0.2950 | 0.9790 | | 0.1632 | 1.99 | 294 | 0.1593 | 0.9842 | | 0.1097 | 2.99 | 441 | 0.1223 | 0.9859 | | 0.0868 | 3.99 | 588 | 0.1053 | 0.9877 | | 0.0651 | 4.99 | 735 | 0.0957 | 0.9886 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
kmewhort/beit-sketch-classifier
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-sketch-classifier This model is a version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) fine-tuned on a dataset of Quick!Draw! sketches (~10% of [QuickDraw's 50M sketches](https://huggingface.co/datasets/kmewhort/quickdraw-bins-50M)). It achieves the following results on the evaluation set: - Loss: 0.7372 - Accuracy: 0.8098 ## Intended uses & limitations It's intended to be used to classifier sketches with a line-segment input format (there's no data augmentation in the fine-tuning; the input raster images ideally need to be generated from line-vector format very similarly to the training images). You can generate the requisite PIL images from Quickdraw `bin` format with the following: ``` # packed bytes -> dict (fro mhttps://github.com/googlecreativelab/quickdraw-dataset/blob/master/examples/binary_file_parser.py) def unpack_drawing(file_handle): key_id, = unpack('Q', file_handle.read(8)) country_code, = unpack('2s', file_handle.read(2)) recognized, = unpack('b', file_handle.read(1)) timestamp, = unpack('I', file_handle.read(4)) n_strokes, = unpack('H', file_handle.read(2)) image = [] n_bytes = 17 for i in range(n_strokes): n_points, = unpack('H', file_handle.read(2)) fmt = str(n_points) + 'B' x = unpack(fmt, file_handle.read(n_points)) y = unpack(fmt, file_handle.read(n_points)) image.append((x, y)) n_bytes += 2 + 2*n_points result = { 'key_id': key_id, 'country_code': country_code, 'recognized': recognized, 'timestamp': timestamp, 'image': image, } return result # packed bin -> RGB PIL def binToPIL(packed_drawing): padding = 8 radius = 7 scale = (224.0-(2*padding)) / 256 unpacked = unpack_drawing(io.BytesIO(packed_drawing)) unpacked_image = unpacked['image'] image = np.full((224,224), 255, np.uint8) for stroke in unpacked['image']: prevX = round(stroke[0][0]*scale) prevY = round(stroke[1][0]*scale) for i in range(1, len(stroke[0])): x = round(stroke[0][i]*scale) y = round(stroke[1][i]*scale) cv2.line(image, (padding+prevX, padding+prevY), (padding+x, padding+y), 0, radius, -1) prevX = x prevY = y pilImage = Image.fromarray(image).convert("RGB") return pilImage ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Accuracy | Validation Loss | |:-------------:|:-----:|:-----:|:--------:|:---------------:| | 0.939 | 1.0 | 12606 | 0.7853 | 0.8275 | | 0.7312 | 2.0 | 25212 | 0.7587 | 0.8027 | | 0.6174 | 3.0 | 37818 | 0.7372 | 0.8098 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "the eiffel tower", "the great wall of china", "the mona lisa", "aircraft carrier", "airplane", "alarm clock", "ambulance", "angel", "animal migration", "ant", "anvil", "apple", "arm", "asparagus", "axe", "backpack", "banana", "bandage", "barn", "baseball", "baseball bat", "basket", "basketball", "bat", "bathtub", "beach", "bear", "beard", "bed", "bee", "belt", "bench", "bicycle", "binoculars", "bird", "birthday cake", "blackberry", "blueberry", "book", "boomerang", "bottlecap", "bowtie", "bracelet", "brain", "bread", "bridge", "broccoli", "broom", "bucket", "bulldozer", "bus", "bush", "butterfly", "cactus", "cake", "calculator", "calendar", "camel", "camera", "camouflage", "campfire", "candle", "cannon", "canoe", "car", "carrot", "castle", "cat", "ceiling fan", "cell phone", "cello", "chair", "chandelier", "church", "circle", "clarinet", "clock", "cloud", "coffee cup", "compass", "computer", "cookie", "cooler", "couch", "cow", "crab", "crayon", "crocodile", "crown", "cruise ship", "cup", "diamond", "dishwasher", "diving board", "dog", "dolphin", "donut", "door", "dragon", "dresser", "drill", "drums", "duck", "dumbbell", "ear", "elbow", "elephant", "envelope", "eraser", "eye", "eyeglasses", "face", "fan", "feather", "fence", "finger", "fire hydrant", "fireplace", "firetruck", "fish", "flamingo", "flashlight", "flip flops", "floor lamp", "flower", "flying saucer", "foot", "fork", "frog", "frying pan", "garden", "garden hose", "giraffe", "goatee", "golf club", "grapes", "grass", "guitar", "hamburger", "hammer", "hand", "harp", "hat", "headphones", "hedgehog", "helicopter", "helmet", "hexagon", "hockey puck", "hockey stick", "horse", "hospital", "hot air balloon", "hot dog", "hot tub", "hourglass", "house", "house plant", "hurricane", "ice cream", "jacket", "jail", "kangaroo", "key", "keyboard", "knee", "knife", "ladder", "lantern", "laptop", "leaf", "leg", "light bulb", "lighter", "lighthouse", "lightning", "line", "lion", "lipstick", "lobster", "lollipop", "mailbox", "map", "marker", "matches", "megaphone", "mermaid", "microphone", "microwave", "monkey", "moon", "mosquito", "motorbike", "mountain", "mouse", "moustache", "mouth", "mug", "mushroom", "nail", "necklace", "nose", "ocean", "octagon", "octopus", "onion", "oven", "owl", "paint can", "paintbrush", "palm tree", "panda", "pants", "paper clip", "parachute", "parrot", "passport", "peanut", "pear", "peas", "pencil", "penguin", "piano", "pickup truck", "picture frame", "pig", "pillow", "pineapple", "pizza", "pliers", "police car", "pond", "pool", "popsicle", "postcard", "potato", "power outlet", "purse", "rabbit", "raccoon", "radio", "rain", "rainbow", "rake", "remote control", "rhinoceros", "rifle", "river", "roller coaster", "rollerskates", "sailboat", "sandwich", "saw", "saxophone", "school bus", "scissors", "scorpion", "screwdriver", "sea turtle", "see saw", "shark", "sheep", "shoe", "shorts", "shovel", "sink", "skateboard", "skull", "skyscraper", "sleeping bag", "smiley face", "snail", "snake", "snorkel", "snowflake", "snowman", "soccer ball", "sock", "speedboat", "spider", "spoon", "spreadsheet", "square", "squiggle", "squirrel", "stairs", "star", "steak", "stereo", "stethoscope", "stitches", "stop sign", "stove", "strawberry", "streetlight", "string bean", "submarine", "suitcase", "sun", "swan", "sweater", "swing set", "sword", "syringe", "t-shirt", "table", "teapot", "teddy-bear", "telephone", "television", "tennis racquet", "tent", "tiger", "toaster", "toe", "toilet", "tooth", "toothbrush", "toothpaste", "tornado", "tractor", "traffic light", "train", "tree", "triangle", "trombone", "truck", "trumpet", "umbrella", "underwear", "van", "vase", "violin", "washing machine", "watermelon", "waterslide", "whale", "wheel", "windmill", "wine bottle", "wine glass", "wristwatch", "yoga", "zebra", "zigzag" ]
socokal/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0720 - Accuracy: 0.9774 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1111 | 1.54 | 100 | 0.0720 | 0.9774 | | 0.0249 | 3.08 | 200 | 0.1081 | 0.9774 | ### Framework versions - Transformers 4.21.1 - Pytorch 1.12.1 - Datasets 2.6.1 - Tokenizers 0.12.1
[ "angular_leaf_spot", "bean_rust", "healthy" ]
venuv62/spoofing_vit_16_224
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # spoofing_vit_16_224 This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.0560 - Accuracy: 0.7088 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 64 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7746 | 0.99 | 54 | 0.6401 | 0.6405 | | 0.339 | 1.99 | 108 | 0.9389 | 0.6042 | | 0.0437 | 2.99 | 162 | 1.0560 | 0.7088 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "imposter", "real" ]
venuv62/autotrained_spoof_detector
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2522377421 - CO2 Emissions (in grams): 2.2511 ## Validation Metrics - Loss: 0.502 - Accuracy: 0.730 - Precision: 0.717 - Recall: 0.760 - AUC: 0.790 - F1: 0.738
[ "fake", "real" ]
venuv62/autotrain-rf_auto_gen-2522877431
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2522877431 - CO2 Emissions (in grams): 2.1186 ## Validation Metrics - Loss: 0.572 - Accuracy: 0.790 - Precision: 0.872 - Recall: 0.680 - AUC: 0.854 - F1: 0.764
[ "fake", "real" ]
yaxue/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0840 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Accuracy | Validation Loss | |:-------------:|:-----:|:----:|:--------:|:---------------:| | 0.2538 | 1.0 | 130 | 0.9624 | 0.2253 | | 0.2861 | 2.0 | 260 | 0.9925 | 0.1086 | | 0.1625 | 3.0 | 390 | 0.0990 | 0.9925 | | 0.0932 | 4.0 | 520 | 0.0840 | 0.9850 | | 0.1535 | 5.0 | 650 | 0.0848 | 0.9850 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1+cpu - Datasets 2.8.0 - Tokenizers 0.13.2
[ "angular_leaf_spot", "bean_rust", "healthy" ]
jonathanfernandes/vit-base-patch16-224-finetuned-flower
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.1+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "daisy", "dandelion", "roses", "sunflowers", "tulips" ]
TheBirdLegacy/CatsandDogsPOC-Resnet
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2540477800 - CO2 Emissions (in grams): 1.6189 ## Validation Metrics - Loss: 0.465 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "cat", "dog" ]
TheBirdLegacy/CatsandDogsPOC-Swin
Resnet is more lightweight but this is better in terms of loss, at the cost of being 3.5X the size. # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2540477801 - CO2 Emissions (in grams): 1.1656 ## Validation Metrics - Loss: 0.000 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "cat", "dog" ]
william7642/my_awesome_food_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_food_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.0616 - Accuracy: 0.7962 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.9969 | 1.0 | 947 | 1.9538 | 0.7321 | | 1.1907 | 2.0 | 1894 | 1.2216 | 0.7806 | | 0.9433 | 3.0 | 2841 | 1.0616 | 0.7962 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
Lunibo/autotrain-csgo_dust2_or_mirage-2555378112
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2555378112 - CO2 Emissions (in grams): 3.5867 ## Validation Metrics - Loss: 0.004 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "dust2", "mirage" ]
Akshat/DysphagiaCls2label
--- license: openrail tags: - image-classification datasets: - TUFTS face dataset # Environmental Impact - CO2 Emissions (in grams): 1.2933 ## Validation Metrics - Loss: 0.359 - Accuracy: 0.871 - Precision: 0.909 - Recall: 0.909 - AUC: 0.880 - F1: 0.909 ## Training Details : - Pre-trained on [Tuft's Face Dataset](https://github.com/kpvisionlab/Tufts-Face-Database) 10,000 images for 350 epochs using a single NVIDIA A100 GPU
[ "disorder", "normal" ]
sallyanndelucia/resnet_weather_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # resnet_weather_model This model was trained from scratch on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.7452 - Accuracy: 0.6736 - F1: 0.6655 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 2.3598 | 1.0 | 91 | 2.1983 | 0.5165 | 0.5146 | | 2.0319 | 2.0 | 182 | 1.8708 | 0.6446 | 0.6433 | | 1.7971 | 3.0 | 273 | 1.7452 | 0.6736 | 0.6655 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "dew", "fogsmog", "frost", "glaze", "hail", "lightening", "lightning", "rain", "rainbow", "rime", "sandstorm", "snow" ]
MazenAmria/swin-base-finetuned-cifar100
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-base-finetuned-cifar100 This model is a fine-tuned version of [microsoft/swin-base-patch4-window7-224](https://huggingface.co/microsoft/swin-base-patch4-window7-224) on the cifar100 dataset. It achieves the following results on the evaluation set: - Accuracy: 0.9201 - Loss: 0.3670 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Accuracy | Validation Loss | |:-------------:|:-----:|:----:|:--------:|:---------------:| | 0.3536 | 1.0 | 781 | 0.9052 | 0.3141 | | 0.3254 | 2.0 | 1562 | 0.9117 | 0.2991 | | 0.0936 | 3.0 | 2343 | 0.9138 | 0.3322 | | 0.1054 | 4.0 | 3124 | 0.9158 | 0.3483 | | 0.0269 | 5.0 | 3905 | 0.9201 | 0.3670 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "apple", "aquarium_fish", "bowl", "boy", "bridge", "bus", "butterfly", "camel", "can", "castle", "caterpillar", "cattle", "baby", "chair", "chimpanzee", "clock", "cloud", "cockroach", "couch", "cra", "crocodile", "cup", "dinosaur", "bear", "dolphin", "elephant", "flatfish", "forest", "fox", "girl", "hamster", "house", "kangaroo", "keyboard", "beaver", "lamp", "lawn_mower", "leopard", "lion", "lizard", "lobster", "man", "maple_tree", "motorcycle", "mountain", "bed", "mouse", "mushroom", "oak_tree", "orange", "orchid", "otter", "palm_tree", "pear", "pickup_truck", "pine_tree", "bee", "plain", "plate", "poppy", "porcupine", "possum", "rabbit", "raccoon", "ray", "road", "rocket", "beetle", "rose", "sea", "seal", "shark", "shrew", "skunk", "skyscraper", "snail", "snake", "spider", "bicycle", "squirrel", "streetcar", "sunflower", "sweet_pepper", "table", "tank", "telephone", "television", "tiger", "tractor", "bottle", "train", "trout", "tulip", "turtle", "wardrobe", "whale", "willow_tree", "wolf", "woman", "worm" ]
MazenAmria/swin-tiny-finetuned-cifar100
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-finetuned-cifar100 This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the cifar100 dataset. It achieves the following results on the evaluation set: - Loss: 0.4223 - Accuracy: 0.8735 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 (with early stopping) ### Training results | Training Loss | Epoch | Step | Accuracy | Validation Loss | |:-------------:|:-----:|:----:|:--------:|:---------------:| | 0.6439 | 1.0 | 781 | 0.8138 | 0.6126 | | 0.6222 | 2.0 | 1562 | 0.8393 | 0.5094 | | 0.2912 | 3.0 | 2343 | 0.861 | 0.4452 | | 0.2234 | 4.0 | 3124 | 0.8679 | 0.4330 | | 0.121 | 5.0 | 3905 | 0.8735 | 0.4223 | | 0.2589 | 6.0 | 4686 | 0.8622 | 0.4775 | | 0.1419 | 7.0 | 5467 | 0.8642 | 0.4900 | | 0.1513 | 8.0 | 6248 | 0.8667 | 0.4956 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "apple", "aquarium_fish", "baby", "bear", "beaver", "bed", "bee", "beetle", "bicycle", "bottle", "bowl", "boy", "bridge", "bus", "butterfly", "camel", "can", "castle", "caterpillar", "cattle", "chair", "chimpanzee", "clock", "cloud", "cockroach", "couch", "cra", "crocodile", "cup", "dinosaur", "dolphin", "elephant", "flatfish", "forest", "fox", "girl", "hamster", "house", "kangaroo", "keyboard", "lamp", "lawn_mower", "leopard", "lion", "lizard", "lobster", "man", "maple_tree", "motorcycle", "mountain", "mouse", "mushroom", "oak_tree", "orange", "orchid", "otter", "palm_tree", "pear", "pickup_truck", "pine_tree", "plain", "plate", "poppy", "porcupine", "possum", "rabbit", "raccoon", "ray", "road", "rocket", "rose", "sea", "seal", "shark", "shrew", "skunk", "skyscraper", "snail", "snake", "spider", "squirrel", "streetcar", "sunflower", "sweet_pepper", "table", "tank", "telephone", "television", "tiger", "tractor", "train", "trout", "tulip", "turtle", "wardrobe", "whale", "willow_tree", "wolf", "woman", "worm" ]
abhinavkk/my_awesome_food_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_food_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.2840 - Accuracy: 0.914 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.3694 | 0.99 | 62 | 2.1818 | 0.831 | | 1.4708 | 1.99 | 124 | 1.4502 | 0.907 | | 1.2797 | 2.99 | 186 | 1.2840 | 0.914 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
platzi/platzi-vit-model-julio-alvarez
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-vit-model-julio-alvarez This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0566 - Accuracy: 0.9774 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1597 | 3.85 | 500 | 0.0566 | 0.9774 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "angular_leaf_spot", "bean_rust", "healthy" ]
abhinavkk/cifar10_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cifar10_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 300 ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ]
jypasona/convnext-tiny-224-finetuned-eurosat-albumentations
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224-finetuned-eurosat-albumentations This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0573 - Accuracy: 0.9848 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1564 | 1.0 | 190 | 0.1283 | 0.9737 | | 0.0677 | 2.0 | 380 | 0.0697 | 0.9837 | | 0.0494 | 3.0 | 570 | 0.0573 | 0.9848 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
toshio19910306/convnext-tiny-224-finetuned-eurosat-albumentations
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # convnext-tiny-224-finetuned-eurosat-albumentations This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0671 - Accuracy: 0.9815 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1452 | 1.0 | 190 | 0.1335 | 0.97 | | 0.0683 | 2.0 | 380 | 0.0825 | 0.9763 | | 0.0584 | 3.0 | 570 | 0.0671 | 0.9815 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
pcernuta/klobasa-ni-klobasa
# convnext-tiny-224-klobasaniklobasa This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on a small dataset of klobasa images scraped from the internets. It achieves the following results on the evaluation set: - Loss: 0.4401 - Accuracy: 0.8958 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.7062 | 1.0 | 48 | 0.7116 | 0.8438 | | 0.4831 | 2.0 | 96 | 0.5968 | 0.8333 | | 0.2429 | 3.0 | 144 | 0.5384 | 0.8542 | | 0.2292 | 4.0 | 192 | 0.4995 | 0.8438 | | 0.1549 | 5.0 | 240 | 0.4508 | 0.8854 | | 0.1682 | 6.0 | 288 | 0.4401 | 0.8958 | | 0.1736 | 7.0 | 336 | 0.4440 | 0.8958 | | 0.0633 | 8.0 | 384 | 0.4406 | 0.8958 | | 0.0689 | 9.0 | 432 | 0.4371 | 0.8958 | | 0.0558 | 10.0 | 480 | 0.4334 | 0.8958 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "hrenovka", "kranjska-klobasa", "krvavica", "mortadela", "pecenica", "prsut" ]
ivensamdh/autotrain-age3-2658279907
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2658279907 - CO2 Emissions (in grams): 5.0658 ## Validation Metrics - Loss: 0.895 - Accuracy: 0.770 - Macro F1: 0.768 - Micro F1: 0.770 - Weighted F1: 0.768 - Macro Precision: 0.773 - Micro Precision: 0.770 - Weighted Precision: 0.773 - Macro Recall: 0.770 - Micro Recall: 0.770 - Weighted Recall: 0.770
[ "0-19", "20-29", "30-39", "40-69", "70+" ]
simlaharma/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.1328 - Accuracy: 0.9699 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Accuracy | Validation Loss | |:-------------:|:-----:|:----:|:--------:|:---------------:| | 0.49 | 1.0 | 65 | 0.9624 | 0.4050 | | 0.2769 | 2.0 | 130 | 0.9850 | 0.1862 | | 0.1441 | 3.0 | 195 | 0.9774 | 0.1554 | | 0.1661 | 4.0 | 260 | 0.9774 | 0.1333 | | 0.1754 | 5.0 | 325 | 0.9699 | 0.1328 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "angular_leaf_spot", "bean_rust", "healthy" ]
EdBianchi/vit-fire-detection
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-fire-detection This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0126 - Precision: 0.9960 - Recall: 0.9960 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:| | 0.1018 | 1.0 | 190 | 0.0375 | 0.9934 | 0.9934 | | 0.0484 | 2.0 | 380 | 0.0167 | 0.9961 | 0.9960 | | 0.0357 | 3.0 | 570 | 0.0253 | 0.9948 | 0.9947 | | 0.0133 | 4.0 | 760 | 0.0198 | 0.9961 | 0.9960 | | 0.012 | 5.0 | 950 | 0.0203 | 0.9947 | 0.9947 | | 0.0139 | 6.0 | 1140 | 0.0204 | 0.9947 | 0.9947 | | 0.0076 | 7.0 | 1330 | 0.0175 | 0.9961 | 0.9960 | | 0.0098 | 8.0 | 1520 | 0.0115 | 0.9974 | 0.9974 | | 0.0062 | 9.0 | 1710 | 0.0133 | 0.9960 | 0.9960 | | 0.0012 | 10.0 | 1900 | 0.0126 | 0.9960 | 0.9960 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.14.0.dev20221111 - Datasets 2.8.0 - Tokenizers 0.12.1
[ "fire", "normal", "smoke" ]
clp/leanne-or-lauren-v2
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # leanne-or-lauren-v2 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "lauren", "leanne" ]
MazenAmria/swin-small-finetuned-cifar100
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-small-finetuned-cifar100 This model is a fine-tuned version of [microsoft/swin-small-patch4-window7-224](https://huggingface.co/microsoft/swin-small-patch4-window7-224) on the cifar100 dataset. It achieves the following results on the evaluation set: - Loss: 0.6281 - Accuracy: 0.8938 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.72 | 1.0 | 781 | 0.6691 | 0.8077 | | 0.6944 | 2.0 | 1562 | 0.4797 | 0.8495 | | 0.2794 | 3.0 | 2343 | 0.4338 | 0.869 | | 0.2569 | 4.0 | 3124 | 0.4263 | 0.879 | | 0.1417 | 5.0 | 3905 | 0.4385 | 0.8819 | | 0.0961 | 6.0 | 4686 | 0.4720 | 0.8854 | | 0.0584 | 7.0 | 5467 | 0.4941 | 0.885 | | 0.0351 | 8.0 | 6248 | 0.5253 | 0.885 | | 0.0107 | 9.0 | 7029 | 0.5598 | 0.8887 | | 0.0118 | 10.0 | 7810 | 0.5998 | 0.8858 | | 0.0097 | 11.0 | 8591 | 0.5957 | 0.8941 | | 0.0044 | 12.0 | 9372 | 0.6237 | 0.8912 | | 0.0013 | 13.0 | 10153 | 0.6286 | 0.8929 | | 0.0102 | 14.0 | 10934 | 0.6281 | 0.8938 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "apple", "aquarium_fish", "bowl", "boy", "bridge", "bus", "butterfly", "camel", "can", "castle", "caterpillar", "cattle", "baby", "chair", "chimpanzee", "clock", "cloud", "cockroach", "couch", "cra", "crocodile", "cup", "dinosaur", "bear", "dolphin", "elephant", "flatfish", "forest", "fox", "girl", "hamster", "house", "kangaroo", "keyboard", "beaver", "lamp", "lawn_mower", "leopard", "lion", "lizard", "lobster", "man", "maple_tree", "motorcycle", "mountain", "bed", "mouse", "mushroom", "oak_tree", "orange", "orchid", "otter", "palm_tree", "pear", "pickup_truck", "pine_tree", "bee", "plain", "plate", "poppy", "porcupine", "possum", "rabbit", "raccoon", "ray", "road", "rocket", "beetle", "rose", "sea", "seal", "shark", "shrew", "skunk", "skyscraper", "snail", "snake", "spider", "bicycle", "squirrel", "streetcar", "sunflower", "sweet_pepper", "table", "tank", "telephone", "television", "tiger", "tractor", "bottle", "train", "trout", "tulip", "turtle", "wardrobe", "whale", "willow_tree", "wolf", "woman", "worm" ]
Ole8/autotrain-candice-2590780077
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2590780077 - CO2 Emissions (in grams): 5.7673 ## Validation Metrics - Loss: 0.018 - Accuracy: 0.998 - Macro F1: 0.998 - Micro F1: 0.998 - Weighted F1: 0.998 - Macro Precision: 0.999 - Micro Precision: 0.998 - Weighted Precision: 0.998 - Macro Recall: 0.998 - Micro Recall: 0.998 - Weighted Recall: 0.998
[ "a", "b", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "c", "u", "v", "w", "x", "y", "z", "d", "e", "f", "g", "h", "i", "j" ]
MaxP/vit-base-riego
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-riego This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.2998 - F1: 0.3729 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 16 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.1696 | 0.79 | 100 | 1.1385 | 0.352 | | 0.08 | 1.59 | 200 | 0.9071 | 0.3774 | | 0.0928 | 2.38 | 300 | 1.1181 | 0.3454 | | 0.0189 | 3.17 | 400 | 0.8262 | 0.3425 | | 0.0728 | 3.97 | 500 | 0.9647 | 0.3747 | | 0.0756 | 4.76 | 600 | 0.6097 | 0.4776 | | 0.0018 | 5.56 | 700 | 1.3900 | 0.3652 | | 0.002 | 6.35 | 800 | 0.7498 | 0.4606 | | 0.0304 | 7.14 | 900 | 1.4367 | 0.3666 | | 0.0024 | 7.94 | 1000 | 1.5714 | 0.3041 | | 0.0463 | 8.73 | 1100 | 0.8038 | 0.4016 | | 0.0014 | 9.52 | 1200 | 0.7175 | 0.4795 | | 0.0015 | 10.32 | 1300 | 1.0347 | 0.3959 | | 0.0009 | 11.11 | 1400 | 1.3881 | 0.3670 | | 0.0131 | 11.9 | 1500 | 1.0780 | 0.4044 | | 0.0007 | 12.7 | 1600 | 0.9834 | 0.4255 | | 0.0011 | 13.49 | 1700 | 1.0753 | 0.4033 | | 0.0007 | 14.29 | 1800 | 1.1514 | 0.3989 | | 0.0007 | 15.08 | 1900 | 1.2373 | 0.3769 | | 0.0007 | 15.87 | 2000 | 1.2998 | 0.3729 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
[ "imagenes_no_riego", "imagenes_riego" ]
saltacc/anime-ai-detect
# Anime AI Art Detect A BEiT classifier to see if anime art was made by an AI or a human. ### Disclaimer Like most AI models, this classifier is not 100% accurate. Please do not take the results of this model as fact. The best version had a 96% accuracy distinguishing aibooru and the images from the imageboard sites. However, the success you have with this model will vary based on the images you are trying to classify. Here are some biases I have noticed from my testing: - Images on aibooru, the site where the AI images were taken from, were high quality AI generations. Low quality AI generations have a higher chance of being misclassified - Textual inversions and hypernetworks increase the chance of misclassification ### Training This model was trained from microsoft/beit-base-patch16-224 for one epoch on 11 thousand images from imageboard sites, and 11 thousand images from aibooru. You can view the wandb run [here](https://wandb.ai/saltacc/huggingface/runs/2mp30x7j?workspace=user-saltacc). ### Use Case I don't intend for this model to be more accurate than humans for detecting AI art. I think the best use cases for this model would be for cases where misclassification isn't a big deal, such as removing AI art from a training dataset.
[ "ai", "human" ]
maixbach/swin-tiny-patch4-window7-224-finetuned-trash_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-trash_classification This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.3372 - Accuracy: 0.8827 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.4991 | 1.0 | 22 | 0.5482 | 0.7911 | | 0.4008 | 2.0 | 44 | 0.5193 | 0.7954 | | 0.3659 | 3.0 | 66 | 0.4464 | 0.8398 | | 0.372 | 4.0 | 88 | 0.4384 | 0.8398 | | 0.3388 | 5.0 | 110 | 0.4281 | 0.8455 | | 0.2654 | 6.0 | 132 | 0.3618 | 0.8712 | | 0.2326 | 7.0 | 154 | 0.3550 | 0.8755 | | 0.2354 | 8.0 | 176 | 0.3401 | 0.8798 | | 0.1774 | 9.0 | 198 | 0.3372 | 0.8827 | | 0.1849 | 10.0 | 220 | 0.3380 | 0.8827 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "g_m", "organic", "other", "paper", "plastic" ]
molsen/autotrain-genderage-2709480568
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2709480568 - CO2 Emissions (in grams): 8.2410 ## Validation Metrics - Loss: 1.277 - Accuracy: 0.560 - Macro F1: 0.560 - Micro F1: 0.560 - Weighted F1: 0.560 - Macro Precision: 0.570 - Micro Precision: 0.560 - Weighted Precision: 0.570 - Macro Recall: 0.560 - Micro Recall: 0.560 - Weighted Recall: 0.560
[ "female_0-19", "female_20-29", "female_30-39", "female_40-69", "female_70+", "male_0-19", "male_20-29", "male_30-39", "male_40-69", "male_70+" ]
ManuD/vit_for_dfl
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit_for_dfl This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the dfl dataset. It achieves the following results on the evaluation set: - Loss: 0.1771 - F1: 0.2453 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.0836 | 1.0 | 358 | 0.1841 | 0.2453 | | 0.207 | 2.0 | 716 | 0.1835 | 0.2453 | | 0.2325 | 3.0 | 1074 | 0.1771 | 0.2453 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "nothing", "challenge", "throwin", "play" ]
Aalaa/Fine_tuned_Vit_trash_classification
# Vision Transformer (base-sized model) Vision Transformer (ViT) model pre-trained on ImageNet-21k (14 million images, 21,843 classes) at resolution 224x224, and fine-tuned on ImageNet 2012 (1 million images, 1,000 classes) at resolution 224x224. It was introduced in the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Dosovitskiy et al. and first released in [this repository](https://github.com/google-research/vision_transformer). However, the weights were converted from the [timm repository](https://github.com/rwightman/pytorch-image-models) by Ross Wightman, who already converted the weights from JAX to PyTorch. Credits go to him. Disclaimer: The team releasing ViT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Dataset The dataset used consist of spans six classes: glass, paper, cardboard, plastic, metal, and trash. Currently, the dataset consists of 2527 images: * 501 glass * 594 paper * 403 cardboard * 482 plastic * 410 metal * 137 trash ## Fine_tuned Notebook This notebook outlines the steps from preparing the data in the Vit-acceptable format to training the model [Notebook](https://colab.research.google.com/drive/1RbmRPJ9bFLA_qK9RGgPoHZRnUTy_md5O?usp=sharing) ### How to use Just copy this lines below: ```python from transformers import AutoFeatureExtractor, AutoModelForImageClassification from PIL import Image import requests url = 'https://www.estal.com/FitxersWeb/331958/estal_carroussel_wg_spirits_5.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = AutoFeatureExtractor.from_pretrained("Aalaa/Fine_tuned_Vit_trash_classification") model = AutoModelForImageClassification.from_pretrained("Aalaa/Fine_tuned_Vit_trash_classification") inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` For more code examples, we refer to the [documentation](https://huggingface.co/transformers/model_doc/vit.html#).
[ "plastic", "glass", "trash", "cardboard", ".ds_store", "paper", "metal" ]
ongp/Pacc
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2718280758 - CO2 Emissions (in grams): 4.8390 ## Validation Metrics - Loss: 0.663 - Accuracy: 0.708 - Macro F1: 0.698 - Micro F1: 0.708 - Weighted F1: 0.712 - Macro Precision: 0.703 - Micro Precision: 0.708 - Weighted Precision: 0.717 - Macro Recall: 0.695 - Micro Recall: 0.708 - Weighted Recall: 0.708
[ "bbw", "chubby", "ssbbw", "thin", "ussbbw" ]
ongp/70btclassification
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2723080856 - CO2 Emissions (in grams): 0.0113 ## Validation Metrics - Loss: 0.877 - Accuracy: 0.708 - Macro F1: 0.695 - Micro F1: 0.708 - Weighted F1: 0.704 - Macro Precision: 0.703 - Micro Precision: 0.708 - Weighted Precision: 0.711 - Macro Recall: 0.699 - Micro Recall: 0.708 - Weighted Recall: 0.708
[ "bbw", "chubby", "ssbbw", "thin", "ussbbw" ]
julenalvaro/platzi_vit_model_julenalvaro
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi_vit_model_julenalvaro This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0314 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1352 | 3.85 | 500 | 0.0314 | 0.9925 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "angular_leaf_spot", "bean_rust", "healthy" ]
julenalvaro/PerrosVSgatos_openai_clip-vit-large-patch14
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cat_vs_dogs_vit_model This model is a fine-tuned version of [openai/clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6518 - Accuracy: 0.6095 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.771 | 0.5 | 500 | 0.6933 | 0.4995 | | 0.6967 | 1.0 | 1000 | 0.6882 | 0.556 | | 0.695 | 1.5 | 1500 | 0.6728 | 0.581 | | 0.6733 | 2.0 | 2000 | 0.6950 | 0.5915 | | 0.6768 | 2.5 | 2500 | 0.6694 | 0.5855 | | 0.6639 | 3.0 | 3000 | 0.6829 | 0.5795 | | 0.652 | 3.5 | 3500 | 0.6637 | 0.5925 | | 0.642 | 4.0 | 4000 | 0.6518 | 0.6095 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "cat", "dog" ]
julenalvaro/Perros-VS-gatos-con-vit-base-patch16-224-in21k
# vit-base-patch16-224-in21k This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1026 - Accuracy: 0.982 ## Model description This model is a fine-tuned version of google/vit-base-patch16-224-in21k which discriminates cats from dogs. ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.177 | 0.5 | 500 | 0.2100 | 0.9435 | | 0.1515 | 1.0 | 1000 | 0.0710 | 0.975 | | 0.0443 | 1.5 | 1500 | 0.2043 | 0.9535 | | 0.0625 | 2.0 | 2000 | 0.0898 | 0.9745 | | 0.0181 | 2.5 | 2500 | 0.0961 | 0.9805 | | 0.0091 | 3.0 | 3000 | 0.1049 | 0.982 | | 0.0016 | 3.5 | 3500 | 0.1066 | 0.981 | | 0.0015 | 4.0 | 4000 | 0.1026 | 0.982 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "cat", "dog" ]
robosapiens/room-styles
model-index: - name: room-styles results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.4196428656578064
[ "coastal room style", "industrial room style", "modern room style", "rustic room style", "traditional room style" ]
simlaharma/vit-base-cifar10
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-cifar10 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the cifar10 dataset. It achieves the following results on the evaluation set: - Loss: 2.3302 - Accuracy: 0.106 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.3324 | 1.0 | 664 | 2.3352 | 0.0967 | | 2.3489 | 2.0 | 1328 | 2.3288 | 0.1049 | | 2.4899 | 3.0 | 1992 | 2.4473 | 0.0989 | | 2.479 | 4.0 | 2656 | 2.4894 | 0.1 | | 2.4179 | 5.0 | 3320 | 2.4404 | 0.0947 | | 2.3881 | 6.0 | 3984 | 2.3931 | 0.102 | | 2.3597 | 7.0 | 4648 | 2.3744 | 0.0967 | | 2.3721 | 8.0 | 5312 | 2.3667 | 0.0935 | | 2.3456 | 9.0 | 5976 | 2.3495 | 0.1036 | | 2.3361 | 10.0 | 6640 | 2.3473 | 0.1025 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ]
DunnBC22/vit-base-patch16-224-in21k_Bart_or_Homer
# vit-base-patch16-224-in21k_Bart_or_Homer This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k). It achieves the following results on the evaluation set: - Loss: 0.0636 - Accuracy: 0.9863 - F1: 0.9841 - Recall: 1.0 - Precision: 0.9688 ## Model description This is a binary classification model to distinguish between Bart and Homer Simpson. For more information on how it was created, check out the following link:https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Bart%20vs%20Homer/Bart_vs_Homer_Image_clf_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/williamu32/dataset-bart-or-homer _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Bart%20vs%20Homer/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.6996 | 1.0 | 13 | 0.1327 | 0.9726 | 0.9688 | 1.0 | 0.9394 | | 0.6996 | 2.0 | 26 | 0.0636 | 0.9863 | 0.9841 | 1.0 | 0.9688 | | 0.6996 | 3.0 | 39 | 0.1420 | 0.9452 | 0.9394 | 1.0 | 0.8857 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "bart", "homer" ]
platzi/platzi-vit-model-omar-espejel
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-vit-model-omar-espejel This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0367 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.149 | 3.85 | 500 | 0.0367 | 0.9850 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "angular_leaf_spot", "bean_rust", "healthy" ]
DunnBC22/vit-base-patch16-224-in21k_GI_diagnosis
# vit-base-patch16-224-in21k_GI_diagnosis This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k). It achieves the following results on the evaluation set: - Loss: 0.2538 - Accuracy: 0.9375 - Weighted f1: 0.9365 - Micro f1: 0.9375 - Macro f1: 0.9365 - Weighted recall: 0.9375 - Micro recall: 0.9375 - Macro recall: 0.9375 - Weighted precision: 0.9455 - Micro precision: 0.9375 - Macro precision: 0.9455 ## Model description This is a multiclass image classification model of GI diagnosis'. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Multiclass%20Classification/Diagnoses%20from%20Colonoscopy%20Images/diagnosis_from_colonoscopy_image_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/francismon/curated-colon-dataset-for-deep-learning ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Weighted f1 | Micro f1 | Macro f1 | Weighted recall | Micro recall | Macro recall | Weighted precision | Micro precision | Macro precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-----------:|:--------:|:--------:|:---------------:|:------------:|:------------:|:------------------:|:---------------:|:---------------:| | 1.3805 | 1.0 | 200 | 0.5006 | 0.8638 | 0.8531 | 0.8638 | 0.8531 | 0.8638 | 0.8638 | 0.8638 | 0.9111 | 0.8638 | 0.9111 | | 1.3805 | 2.0 | 400 | 0.2538 | 0.9375 | 0.9365 | 0.9375 | 0.9365 | 0.9375 | 0.9375 | 0.9375 | 0.9455 | 0.9375 | 0.9455 | | 0.0628 | 3.0 | 600 | 0.5797 | 0.8812 | 0.8740 | 0.8812 | 0.8740 | 0.8812 | 0.8812 | 0.8813 | 0.9157 | 0.8812 | 0.9157 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1 - Datasets 2.5.2 - Tokenizers 0.12.1
[ "0_normal", "1_ulcerative_colitis", "2_polyps", "3_esophagitis" ]
DunnBC22/vit-base-patch16-224-in21k_Simpsons_Family_Members
# vit-base-patch16-224-in21k_Simpsons_Family_Members This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k). It achieves the following results on the evaluation set: - Loss: 0.2431 - Accuracy: 0.9530 - F1 - Weighted: 0.9522 - Micro: 0.9530 - Macro: 0.9521 - Recall - Weighted: 0.9530 - Micro: 0.9530 - Macro: 0.9531 - Precision - Weighted: 0.9605 - Micro: 0.9530 - Macro: 0.9601 ## Model description This is a multiclass image classification model of members of The Simpsons family. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Multiclass%20Classification/Simpsons%20Family%20Images/Simpsons_family_with_hf_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/williamu32/dataset-bart-or-homer _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Multiclass%20Classification/Simpsons%20Family%20Images/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Weighted F1 | Micro F1 | Macro F1 | Weighted Recall | Micro Recall | Macro Recall | Weighted Precision | Micro Precision | Macro Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-----------:|:--------:|:--------:|:---------------:|:------------:|:------------:|:------------------:|:---------------:|:---------------:| | 1.5773 | 1.0 | 373 | 1.0482 | 0.7772 | 0.7263 | 0.7772 | 0.7261 | 0.7772 | 0.7772 | 0.7778 | 0.8933 | 0.7772 | 0.8922 | | 0.1598 | 2.0 | 746 | 0.3902 | 0.9059 | 0.9028 | 0.9059 | 0.9026 | 0.9059 | 0.9059 | 0.9060 | 0.9224 | 0.9059 | 0.9219 | | 0.027 | 3.0 | 1119 | 0.2431 | 0.9530 | 0.9522 | 0.9530 | 0.9521 | 0.9530 | 0.9530 | 0.9531 | 0.9605 | 0.9530 | 0.9601 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1 - Datasets 2.5.2 - Tokenizers 0.12.1
[ "bart_simpson", "homer_simpson", "lisa_simpson", "maggie_simpson", "marge_simpson" ]
zlgao/swin-tiny-patch4-window7-224-finetuned-fluro_cls
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-fluro_cls This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0000 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.67 | 1 | 0.7112 | 0.5238 | | No log | 1.67 | 2 | 0.5591 | 0.8571 | | 0.811 | 2.67 | 3 | 0.3781 | 0.9524 | | 0.811 | 3.67 | 4 | 0.1995 | 1.0 | | 0.811 | 4.67 | 5 | 0.1215 | 1.0 | | 0.3531 | 5.67 | 6 | 0.0578 | 1.0 | | 0.3531 | 6.67 | 7 | 0.0195 | 1.0 | | 0.3531 | 7.67 | 8 | 0.0072 | 1.0 | | 0.0618 | 8.67 | 9 | 0.0030 | 1.0 | | 0.0618 | 9.67 | 10 | 0.0012 | 1.0 | | 0.0618 | 10.67 | 11 | 0.0005 | 1.0 | | 0.0079 | 11.67 | 12 | 0.0003 | 1.0 | | 0.0079 | 12.67 | 13 | 0.0001 | 1.0 | | 0.0079 | 13.67 | 14 | 0.0001 | 1.0 | | 0.0051 | 14.67 | 15 | 0.0001 | 1.0 | | 0.0051 | 15.67 | 16 | 0.0000 | 1.0 | | 0.0051 | 16.67 | 17 | 0.0000 | 1.0 | | 0.0017 | 17.67 | 18 | 0.0000 | 1.0 | | 0.0017 | 18.67 | 19 | 0.0000 | 1.0 | | 0.0017 | 19.67 | 20 | 0.0000 | 1.0 | | 0.0004 | 20.67 | 21 | 0.0000 | 1.0 | | 0.0004 | 21.67 | 22 | 0.0000 | 1.0 | | 0.0004 | 22.67 | 23 | 0.0000 | 1.0 | | 0.0022 | 23.67 | 24 | 0.0000 | 1.0 | | 0.0022 | 24.67 | 25 | 0.0000 | 1.0 | | 0.0022 | 25.67 | 26 | 0.0000 | 1.0 | | 0.001 | 26.67 | 27 | 0.0000 | 1.0 | | 0.001 | 27.67 | 28 | 0.0000 | 1.0 | | 0.001 | 28.67 | 29 | 0.0000 | 1.0 | | 0.0013 | 29.67 | 30 | 0.0000 | 1.0 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.10.2+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "mcf7", "mda231" ]
gneuert/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the cifar10 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ]
DunnBC22/vit-base-patch16-224-in21k_lung_and_colon_cancer
# vit-base-patch16-224-in21k_lung_and_colon_cancer This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k). It achieves the following results on the evaluation set: - Loss: 0.0016 - Accuracy: 0.9994 - F1 - Weighted: 0.9994 - Micro: 0.9994 - Macro: 0.9994 - Recall - Weighted: 0.9994 - Micro: 0.9994 - Macro: 0.9994 - Precision - Weighted: 0.9994 - Micro: 0.9994 - Macro: 0.9994 ## Model description This is a multiclass image classification model of lung and colon cancers. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Multiclass%20Classification/Lung%20%26%20Colon%20Cancer/Lung_and_colon_cancer_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/andrewmvd/lung-and-colon-cancer-histopathological-images _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Multiclass%20Classification/Lung%20%26%20Colon%20Cancer/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Weighted F1 | Micro F1 | Macro F1 | Weighted Recall | Micro Recall | Macro Recall | Weighted Precision | Micro Precision | Macro Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-----------:|:--------:|:--------:|:---------------:|:------------:|:------------:|:------------------:|:---------------:|:---------------:| | 0.0574 | 1.0 | 1250 | 0.0410 | 0.9864 | 0.9864 | 0.9864 | 0.9865 | 0.9864 | 0.9864 | 0.9864 | 0.9872 | 0.9864 | 0.9875 | | 0.0031 | 2.0 | 2500 | 0.0105 | 0.9972 | 0.9972 | 0.9972 | 0.9972 | 0.9972 | 0.9972 | 0.9973 | 0.9972 | 0.9972 | 0.9972 | | 0.0007 | 3.0 | 3750 | 0.0016 | 0.9994 | 0.9994 | 0.9994 | 0.9994 | 0.9994 | 0.9994 | 0.9994 | 0.9994 | 0.9994 | 0.9994 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1 - Datasets 2.5.2 - Tokenizers 0.12.1
[ "colon_aca", "colon_n", "lung_aca", "lung_n", "lung_scc" ]
gualti/vit-base-patch16-224-finetuned-flower
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "daisy", "dandelion", "roses", "sunflowers", "tulips" ]
DunnBC22/vit-base-patch16-224-in21k_car_or_motorcycle
# vit-base-patch16-224-in21k_car_or_motorcycle This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0301 - Accuracy: 0.9938 - F1: 0.9939 - Recall: 0.9927 - Precision: 0.9951 ## Model description This is a binary classification model to distinguish between images of cars and images of motorcycles. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Car%20or%20Motorcycle/Car_or_Motorcycle_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/utkarshsaxenadn/car-vs-bike-classification-dataset _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Car%20or%20Motorcycle/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.6908 | 1.0 | 200 | 0.0372 | 0.99 | 0.9902 | 0.9902 | 0.9902 | | 0.6908 | 2.0 | 400 | 0.0301 | 0.9938 | 0.9939 | 0.9927 | 0.9951 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1 - Datasets 2.5.2 - Tokenizers 0.12.1
[ "bike", "car" ]
polejowska/swin-tiny-patch4-window7-224-lcbsi-wbc-new
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-lcbsi-wbc-new This model is a fine-tuned version of [polejowska/swin-tiny-patch4-window7-224-lcbsi-wbc](https://huggingface.co/polejowska/swin-tiny-patch4-window7-224-lcbsi-wbc) on the WBC dataset. It achieves the following results on the evaluation set: - Loss: 0.0457 - Accuracy: 0.992 - Precision: 0.9920 - Recall: 0.992 - F1: 0.9920 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002562 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 0.0936 | 0.98 | 27 | 0.0724 | 0.984 | 0.9841 | 0.984 | 0.9840 | | 0.0276 | 1.98 | 54 | 0.0768 | 0.984 | 0.9841 | 0.984 | 0.9839 | | 0.0133 | 2.98 | 81 | 0.0457 | 0.992 | 0.9920 | 0.992 | 0.9920 | ### Framework versions - Transformers 4.25.1 - Pytorch 2.0.0.dev20230107 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "basophil", "eosinophil", "lymphocyte", "monocyte", "neutrophil" ]
Celal11/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 1.0639 - Accuracy: 0.5960 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.4361 | 1.0 | 101 | 1.2522 | 0.5284 | | 1.3156 | 2.0 | 202 | 1.1060 | 0.5719 | | 1.2426 | 3.0 | 303 | 1.0639 | 0.5960 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
DunnBC22/vit-base-patch16-224-in21k_covid_19_ct_scans
# vit-base-patch16-224-in21k_covid_19_ct_scans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k). It achieves the following results on the evaluation set: - Loss: 0.1727 - Accuracy: 0.94 - F1: 0.9379 - Recall: 0.8947 - Precision: 0.9855 ## Model description This is a binary classification model to distinguish between CT scans that detect COVID-19 and those who do not. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Binary%20Classification/COVID19%20Lung%20CT%20Scans/COVID19_Lung_CT_Scans_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/luisblanche/covidct _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Binary%20Classification/COVID19%20Lung%20CT%20Scans/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.6742 | 1.0 | 38 | 0.4309 | 0.9 | 0.8993 | 0.8816 | 0.9178 | | 0.6742 | 2.0 | 76 | 0.3739 | 0.8467 | 0.8686 | 1.0 | 0.7677 | | 0.6742 | 3.0 | 114 | 0.1727 | 0.94 | 0.9379 | 0.8947 | 0.9855 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1 - Datasets 2.5.2 - Tokenizers 0.12.1
[ "ct_covid", "ct_noncovid" ]
lixiqi/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 1.0384 - Accuracy: 0.6054 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.3974 | 1.0 | 202 | 1.3145 | 0.5120 | | 1.1948 | 2.0 | 404 | 1.0653 | 0.6040 | | 1.1676 | 3.0 | 606 | 1.0384 | 0.6054 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
lixiqi/vit-base-patch16-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-eurosat This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.8561 - Accuracy: 0.6701 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1531 | 1.0 | 202 | 0.9684 | 0.6329 | | 1.0658 | 2.0 | 404 | 0.8881 | 0.6667 | | 1.0049 | 3.0 | 606 | 0.8561 | 0.6701 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013 This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.8504 - Accuracy: 0.6879 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1617 | 1.0 | 202 | 1.0081 | 0.6270 | | 1.0604 | 2.0 | 404 | 0.9516 | 0.6524 | | 0.998 | 3.0 | 606 | 0.8857 | 0.6809 | | 0.9971 | 4.0 | 808 | 0.8504 | 0.6879 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
Celal11/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013 This model is a fine-tuned version of [Celal11/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013](https://huggingface.co/Celal11/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.6185 - Accuracy: 0.7743 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.9609 | 1.0 | 403 | 0.6678 | 0.7555 | | 0.7786 | 2.0 | 806 | 0.6185 | 0.7743 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-0.0001
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-0.0001 This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.8601 - Accuracy: 0.6855 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1632 | 1.0 | 202 | 0.9975 | 0.6290 | | 1.0563 | 2.0 | 404 | 0.9350 | 0.6614 | | 0.9564 | 3.0 | 606 | 0.8601 | 0.6855 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
DunnBC22/vit-base-patch16-224-in21k_brain_tumor_diagnosis
# vit-base-patch16-224-in21k_brain_tumor_diagnosis This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.2591 - Accuracy: 0.9216 - F1: 0.9375 - Recall: 1.0 - Precision: 0.8824 ## Model description This is a binary classification model to distinguish between if the MRI images detect a brain tumor or not. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Brain%20Tumor%20MRI%20Images/brain_tumor_MRI_Images_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/navoneel/brain-mri-images-for-brain-tumor-detection _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Brain%20Tumor%20MRI%20Images/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.7101 | 1.0 | 13 | 0.3351 | 0.9412 | 0.9474 | 0.9 | 1.0 | | 0.7101 | 2.0 | 26 | 0.3078 | 0.9020 | 0.9231 | 1.0 | 0.8571 | | 0.7101 | 3.0 | 39 | 0.2591 | 0.9216 | 0.9375 | 1.0 | 0.8824 | | 0.7101 | 4.0 | 52 | 0.2702 | 0.9020 | 0.9123 | 0.8667 | 0.9630 | | 0.7101 | 5.0 | 65 | 0.2855 | 0.9020 | 0.9123 | 0.8667 | 0.9630 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1 - Datasets 2.8.0 - Tokenizers 0.12.1
[ "no", "yes" ]
lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-9e-05
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-9e-05 This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.8481 - Accuracy: 0.6840 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 9e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1839 | 1.0 | 224 | 1.0266 | 0.6120 | | 1.0333 | 2.0 | 448 | 0.9063 | 0.6608 | | 0.9655 | 3.0 | 672 | 0.8481 | 0.6840 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
DunnBC22/vit-base-patch16-224-in21k_male_or_female_eyes
# vit-base-patch16-224-in21k_male_or_female_eyes This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0810 - Accuracy: 0.9727 - F1: 0.9741 - Recall: 0.9666 - Precision: 0.9818 ## Model description This is a binary classification model to distinguish between male and female eyes. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Male%20or%20Female%20Eyes/are_they_male_or_female_eyes_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/pavelbiz/eyes-rtte _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Male%20or%20Female%20Eyes/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.1998 | 1.0 | 577 | 0.2365 | 0.9072 | 0.9196 | 0.9976 | 0.8530 | | 0.0846 | 2.0 | 1154 | 0.0810 | 0.9727 | 0.9741 | 0.9666 | 0.9818 | | 0.0309 | 3.0 | 1731 | 0.0852 | 0.9809 | 0.9821 | 0.9837 | 0.9805 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1 - Datasets 2.8.0 - Tokenizers 0.12.1
[ "female_eyes", "male_eyes" ]
NTQAI/pedestrian_age_recognition
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pedestrian_age_recognition_local This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.5004 - Accuracy: 0.8073 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.8849 | 1.0 | 2008 | 0.7939 | 0.6807 | | 0.9836 | 2.0 | 4016 | 0.6694 | 0.7336 | | 0.8128 | 3.0 | 6024 | 0.5768 | 0.7668 | | 0.7611 | 4.0 | 8032 | 0.5541 | 0.7833 | | 0.6441 | 5.0 | 10040 | 0.5473 | 0.7773 | | 0.5696 | 6.0 | 12048 | 0.5187 | 0.7971 | | 0.6925 | 7.0 | 14056 | 0.5082 | 0.8038 | | 0.5711 | 8.0 | 16064 | 0.5092 | 0.8098 | | 0.7741 | 9.0 | 18072 | 0.5026 | 0.8020 | | 0.5269 | 10.0 | 20080 | 0.5004 | 0.8073 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1 ### Contact information For personal communication related to this project, please contact Nha Nguyen Van ([email protected]).
[ "age16-30", "age31-45", "age46-60", "ageabove60", "ageless15" ]
lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-7e-05
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-7e-05 This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.7881 - Accuracy: 0.7221 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.2307 | 1.0 | 224 | 1.0863 | 0.5874 | | 1.0893 | 2.0 | 448 | 0.9700 | 0.6362 | | 1.0244 | 3.0 | 672 | 0.8859 | 0.6757 | | 1.016 | 4.0 | 896 | 0.8804 | 0.6787 | | 0.9089 | 5.0 | 1120 | 0.8611 | 0.6897 | | 0.8935 | 6.0 | 1344 | 0.8283 | 0.7028 | | 0.8403 | 7.0 | 1568 | 0.8116 | 0.7102 | | 0.8179 | 8.0 | 1792 | 0.7934 | 0.7166 | | 0.7764 | 9.0 | 2016 | 0.7865 | 0.7208 | | 0.771 | 10.0 | 2240 | 0.7881 | 0.7221 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-5e-05
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-5e-05 This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.8610 - Accuracy: 0.6833 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1691 | 1.0 | 224 | 0.9764 | 0.6310 | | 1.0304 | 2.0 | 448 | 0.8965 | 0.6666 | | 0.9844 | 3.0 | 672 | 0.8610 | 0.6833 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-8e-05
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-8e-05 This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.8513 - Accuracy: 0.6851 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1538 | 1.0 | 224 | 1.0147 | 0.6173 | | 1.03 | 2.0 | 448 | 0.9185 | 0.6588 | | 0.9692 | 3.0 | 672 | 0.8513 | 0.6851 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-6e-05
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-6e-05 This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.8551 - Accuracy: 0.6863 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.1611 | 1.0 | 224 | 0.9904 | 0.6278 | | 1.0324 | 2.0 | 448 | 0.9066 | 0.6666 | | 0.9725 | 3.0 | 672 | 0.8551 | 0.6863 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
team-marmalade/autotrain-lots_of_text-2797882537
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2797882537 - CO2 Emissions (in grams): 2.7585 ## Validation Metrics - Loss: 0.076 - Accuracy: 0.970 - Precision: 0.980 - Recall: 0.988 - AUC: 0.991 - F1: 0.984
[ "lots_of_text", "quality" ]
Celal11/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013CKPlus
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013CKPlus This model is a fine-tuned version of [Celal11/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013](https://huggingface.co/Celal11/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.0089 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.9918 | 0.97 | 27 | 0.2528 | 0.8985 | | 0.3355 | 1.97 | 54 | 0.0703 | 0.9797 | | 0.2484 | 2.97 | 81 | 0.0232 | 0.9848 | | 0.1971 | 3.97 | 108 | 0.0197 | 0.9848 | | 0.1731 | 4.97 | 135 | 0.0089 | 1.0 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
Celal11/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013CKPlus-7e-05
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013CKPlus-7e-05 This model is a fine-tuned version of [lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-7e-05](https://huggingface.co/lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-7e-05) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.0127 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.9364 | 0.97 | 27 | 0.1873 | 0.9645 | | 0.3365 | 1.97 | 54 | 0.0951 | 0.9848 | | 0.2482 | 2.97 | 81 | 0.0562 | 0.9949 | | 0.1844 | 3.97 | 108 | 0.0213 | 0.9949 | | 0.1693 | 4.97 | 135 | 0.0127 | 1.0 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
BIDEQUITY/autotrain-software_picture_preselection_classifier-2804582686
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2804582686 - CO2 Emissions (in grams): 2.0734 ## Validation Metrics - Loss: 0.209 - Accuracy: 0.973 - Macro F1: 0.980 - Micro F1: 0.973 - Weighted F1: 0.973 - Macro Precision: 0.980 - Micro Precision: 0.973 - Weighted Precision: 0.973 - Macro Recall: 0.980 - Micro Recall: 0.973 - Weighted Recall: 0.973
[ "group", "portrait", "stock" ]
lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-7e-05-finetuned-SFEW-7e-05
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-7e-05-finetuned-FER2013-7e-05 This model is a fine-tuned version of [lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-7e-05](https://huggingface.co/lixiqi/beit-base-patch16-224-pt22k-ft22k-finetuned-FER2013-7e-05) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 1.5659 - Accuracy: 0.5260 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.6537 | 0.97 | 14 | 1.4980 | 0.4683 | | 1.4325 | 1.97 | 28 | 1.4777 | 0.5040 | | 1.1532 | 2.97 | 42 | 1.5007 | 0.4960 | | 1.0428 | 3.97 | 56 | 1.5480 | 0.4890 | | 0.8716 | 4.97 | 70 | 1.5659 | 0.5260 | | 0.892 | 5.97 | 84 | 1.6132 | 0.4960 | | 0.8109 | 6.97 | 98 | 1.5895 | 0.5167 | | 0.7413 | 7.97 | 112 | 1.6271 | 0.5202 | | 0.765 | 8.97 | 126 | 1.5991 | 0.5040 | | 0.6575 | 9.97 | 140 | 1.6041 | 0.4960 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
kmewhort/beit-sketch-classifier-pt-metaset
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # beit-sketch-classifier-pt-metaset This model is a fine-tuned version of [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6732 - Accuracy: 0.8277 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:------:|:---------------:|:--------:| | 0.8069 | 1.0 | 76608 | 0.7673 | 0.7988 | | 0.6922 | 2.0 | 153216 | 0.6982 | 0.8159 | | 0.6289 | 3.0 | 229824 | 0.6709 | 0.8236 | | 0.5332 | 4.0 | 306432 | 0.6635 | 0.8271 | | 0.4283 | 5.0 | 383040 | 0.6732 | 0.8277 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "0", "1", "10", "100", "101", "102", "103", "104", "105", "106", "107", "108", "109", "11", "110", "111", "112", "113", "114", "115", "116", "117", "118", "119", "12", "120", "121", "122", "123", "124", "125", "126", "127", "128", "129", "13", "130", "131", "132", "133", "134", "135", "136", "137", "138", "139", "14", "140", "141", "142", "143", "144", "145", "146", "147", "148", "149", "15", "150", "151", "152", "153", "154", "155", "156", "157", "158", "159", "16", "160", "161", "162", "163", "164", "165", "166", "167", "168", "169", "17", "170", "171", "172", "173", "174", "175", "176", "177", "178", "179", "18", "180", "181", "182", "183", "184", "185", "186", "187", "188", "189", "19", "190", "191", "192", "193", "194", "195", "196", "197", "198", "199", "2", "20", "200", "201", "202", "203", "204", "205", "206", "207", "208", "209", "21", "210", "211", "212", "213", "214", "215", "216", "217", "218", "219", "22", "220", "221", "222", "223", "224", "225", "226", "227", "228", "229", "23", "230", "231", "232", "233", "234", "235", "236", "237", "238", "239", "24", "240", "241", "242", "243", "244", "245", "246", "247", "248", "249", "25", "250", "251", "252", "253", "254", "255", "256", "257", "258", "259", "26", "260", "261", "262", "263", "264", "265", "266", "267", "268", "269", "27", "270", "271", "272", "273", "274", "275", "276", "277", "278", "279", "28", "280", "281", "282", "283", "284", "285", "286", "287", "288", "289", "29", "290", "291", "292", "293", "294", "295", "296", "297", "298", "299", "3", "30", "300", "301", "302", "303", "304", "305", "306", "307", "308", "309", "31", "310", "311", "312", "313", "314", "315", "316", "317", "318", "319", "32", "320", "321", "322", "323", "324", "325", "326", "327", "328", "329", "33", "330", "331", "332", "333", "334", "335", "336", "337", "338", "339", "34", "340", "341", "342", "343", "344", "35", "36", "37", "38", "39", "4", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "5", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "6", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "7", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "8", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "9", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99" ]
ongp/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "ass%20focus", "bbw", "chubby", "ssbbw", "thin", "ussbbw" ]
DunnBC22/vit-base-patch16-224-in21k_dog_vs_cat_image_classification
# vit-base-patch16-224-in21k_dog_vs_cat_image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k). It achieves the following results on the evaluation set: - Loss: 0.0404 - Accuracy: 0.99 - F1: 0.9897 - Recall: 0.9909 - Precision: 0.9885 ## Model description This is a binary classification model to distinguish between cats and dogs. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Dogs%20or%20Cats%20Image%20Classification/Dog_v_Cat_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/shaunthesheep/microsoft-catsvsdogs-dataset _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Binary%20Classification/Dogs%20or%20Cats%20Image%20Classification/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.0896 | 1.0 | 1250 | 0.0590 | 0.979 | 0.9783 | 0.9728 | 0.9838 | | 0.0253 | 2.0 | 2500 | 0.0543 | 0.9842 | 0.9837 | 0.9802 | 0.9871 | | 0.0066 | 3.0 | 3750 | 0.0404 | 0.99 | 0.9897 | 0.9909 | 0.9885 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1 - Datasets 2.8.0 - Tokenizers 0.12.1
[ "cat", "dog" ]
DunnBC22/vit-base-patch16-224-in21k_vegetables_clf
# vit-base-patch16-224-in21k_vegetables_clf This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k). It achieves the following results on the evaluation set: - Loss: 0.0014 - Accuracy: 1.0 - F1 - Weighted: 1.0 - Micro: 1.0 - Macro: 1.0 - Recall - Weighted: 1.0 - Micro: 1.0 - Macro: 1.0 - Precision - Weighted: 1.0 - Micro: 1.0 - Macro: 1.0 ## Model description This is a multiclass image classification model of different vegetables. For more information on how it was created, check out the following link: https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/blob/main/Computer%20Vision/Image%20Classification/Multiclass%20Classification/Vegetable%20Image%20Classification/Vegetables_ViT.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://www.kaggle.com/datasets/misrakahmed/vegetable-image-dataset _Sample Images From Dataset:_ ![Sample Images](https://github.com/DunnBC22/Vision_Audio_and_Multimodal_Projects/raw/main/Computer%20Vision/Image%20Classification/Multiclass%20Classification/Vegetable%20Image%20Classification/Images/Sample%20Images.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Weighted F1 | Micro F1 | Macro F1 | Weighted Recall | Micro Recall | Macro Recall | Weighted Precision | Micro Precision | Macro Precision | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-----------:|:--------:|:--------:|:---------------:|:------------:|:------------:|:------------------:|:---------------:|:---------------:| | 0.2079 | 1.0 | 938 | 0.0193 | 0.996 | 0.9960 | 0.996 | 0.9960 | 0.996 | 0.996 | 0.9960 | 0.9960 | 0.996 | 0.9960 | | 0.0154 | 2.0 | 1876 | 0.0068 | 0.9987 | 0.9987 | 0.9987 | 0.9987 | 0.9987 | 0.9987 | 0.9987 | 0.9987 | 0.9987 | 0.9987 | | 0.0018 | 3.0 | 2814 | 0.0014 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | 1.0 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1 - Datasets 2.8.0 - Tokenizers 0.12.1
[ "bean", "bitter_gourd", "papaya", "potato", "pumpkin", "radish", "tomato", "bottle_gourd", "brinjal", "broccoli", "cabbage", "capsicum", "carrot", "cauliflower", "cucumber" ]
jayanta/google-vit-base-patch16-224-face
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # google-vit-base-patch16-224-face This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.4531 - Accuracy: 0.7249 - Precision: 0.7172 - Recall: 0.7249 - F1: 0.7196 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00012 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 0.8514 | 1.0 | 290 | 0.8464 | 0.7048 | 0.7035 | 0.7048 | 0.6909 | | 0.7202 | 2.0 | 580 | 0.7791 | 0.7283 | 0.7297 | 0.7283 | 0.7111 | | 0.5455 | 3.0 | 870 | 0.7950 | 0.7285 | 0.7174 | 0.7285 | 0.7171 | | 0.334 | 4.0 | 1160 | 0.8948 | 0.7155 | 0.7152 | 0.7155 | 0.7145 | | 0.1644 | 5.0 | 1450 | 1.0820 | 0.7239 | 0.7189 | 0.7239 | 0.7194 | | 0.0482 | 6.0 | 1740 | 1.2792 | 0.7204 | 0.7144 | 0.7204 | 0.7160 | | 0.0236 | 7.0 | 2030 | 1.4162 | 0.7279 | 0.7195 | 0.7279 | 0.7209 | | 0.0049 | 8.0 | 2320 | 1.4531 | 0.7249 | 0.7172 | 0.7249 | 0.7196 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1 - Tokenizers 0.13.1
[ "angry", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
furusu/umamusume-classifier
finetuned from https://huggingface.co/google/vit-base-patch16-224-in21k dataset:26k images (train:21k valid:5k) accuracy of validation dataset is 95% ```Python from transformers import ViTFeatureExtractor, ViTForImageClassification from PIL import Image path = 'image_path' image = Image.open(path) feature_extractor = ViTFeatureExtractor.from_pretrained('furusu/umamusume-classifier') model = ViTForImageClassification.from_pretrained('furusu/umamusume-classifier') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) predicted_class_idx = outputs.logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ```
[ "haru urara", "rice shower", "copano rickey", "seeking the pearl", "berno light", "buena vista", "queen beret", "beauty again", "sun visor", "satono crown", "little trattoria", "hishi amazon", "agnes tachyon", "seiun sky", "silence suzuka", "zenno rob roy", "agnes digital", "king halo", "mayano top gun", "orfevre", "grass wonder", "gold ship", "twin turbo", "eishin flash", "mejiro dober", "tamamo cross", "meisho doto", "admire vega", "curren chan", "nishino flower", "yukino bijin", "hishi akebono", "special week", "air groove", "super creek", "mr. c.b.", "air shakur", "matikanefukukitaru", "tokai teio", "sweep tosho", "maruzensky", "aston machan", "daiwa scarlet", "mejiro ardan", "smart falcon", "matikane tannhauser", "el condor pasa", "k.s.miracle", "sirius symboli", "kitasan black", "gold city", "mihono bourbon", "wonder acute", "nice nature", "fuji kiseki", "vodka", "nakayama festa", "narita brian", "oguri cap", "narita top road", "tosen jordan", "taiki shuttle", "fine motion", "biwa hayahide", "symboli rudolf", "yamanin zephyr", "narita taishin", "marvelous sunday", "mejiro palmer", "daring tact", "deep impact", "mejiro bright", "sakura chiyono o", "bitter glasse", "mejiro ryan", "satono diamond", "akikawa yayoi", "t.m. opera o", "mejiro ramonu", "daiichi ruby", "ines fujin", "yaeno muteki", "shinko windy", "tanino gimlet", "ikuno dictus", "symboli kris s", "manhattan cafe", "daitaku helios", "bamboo memory", "little cocon", "sakura bakushin o", "kin'iro ryotei", "cheval grand", "hokko tarumae", "kawakami princess", "winning ticket", "light hello", "mejiro mcqueen", "happy meek", "sakura laurel", "inari one", "trainer", "biko pegasus", "montjeu", "broye", "tsurumaru tsuyoshi", "toni bianca", "obey your master" ]
platzi/platzi-vit-model-elyager
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # platzi-vit-model-elyager This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0677 - Accuracy: 0.9774 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0067 | 3.85 | 500 | 0.0677 | 0.9774 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "angular_leaf_spot", "bean_rust", "healthy" ]
keithanpai/vit-base-patch32-224-in21k-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch32-224-in21k-finetuned-eurosat This model is a fine-tuned version of [google/vit-base-patch32-224-in21k](https://huggingface.co/google/vit-base-patch32-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.8115 - Accuracy: 0.9945 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.8903 | 1.0 | 102 | 1.5728 | 0.9517 | | 1.2226 | 2.0 | 204 | 0.9374 | 0.9917 | | 1.1069 | 3.0 | 306 | 0.8115 | 0.9945 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "del", "nothing", "space" ]
owsgfwnlgjuz/autotrain-test_auto_nlp-2885884378
# Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 2885884378 - CO2 Emissions (in grams): 1.1398 ## Validation Metrics - Loss: 0.079 - Accuracy: 1.000 - Precision: 1.000 - Recall: 1.000 - AUC: 1.000 - F1: 1.000
[ "lamborghini", "vaz" ]
susnato/my_food_classifier
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # susnato/my_food_classifier This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0074 - Validation Loss: 0.2560 - Train Accuracy: 0.945 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 20000, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.0180 | 0.2310 | 0.946 | 0 | | 0.0126 | 0.2385 | 0.946 | 1 | | 0.0104 | 0.2445 | 0.944 | 2 | | 0.0088 | 0.2505 | 0.944 | 3 | | 0.0074 | 0.2560 | 0.945 | 4 | ### Framework versions - Transformers 4.25.1 - TensorFlow 2.9.2 - Datasets 2.8.0 - Tokenizers 0.13.2
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
naveensb8182/vit-base-beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Accuracy: 0.9774 - Loss: 0.0876 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Accuracy | Validation Loss | |:-------------:|:-----:|:----:|:--------:|:---------------:| | 0.26 | 1.0 | 130 | 0.9549 | 0.2285 | | 0.277 | 2.0 | 260 | 0.9925 | 0.1066 | | 0.1629 | 3.0 | 390 | 0.9699 | 0.1069 | | 0.0963 | 4.0 | 520 | 0.9774 | 0.0885 | | 0.1569 | 5.0 | 650 | 0.9774 | 0.0876 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1+cpu - Datasets 2.8.0 - Tokenizers 0.13.2
[ "angular_leaf_spot", "bean_rust", "healthy" ]
asd0936/my_awesome_food_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_food_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.5916 - Accuracy: 0.897 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.6742 | 0.99 | 62 | 2.5104 | 0.821 | | 1.8036 | 1.99 | 124 | 1.7824 | 0.863 | | 1.591 | 2.99 | 186 | 1.5916 | 0.897 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
fxmarty/tiny-testing-remote-code
Use at your own risk: ```python from transformers import AutoFeatureExtractor, AutoModelForImageClassification from datasets import load_dataset import torch feature_extractor = AutoFeatureExtractor.from_pretrained("fxmarty/tiny-testing-remote-code") model = AutoModelForImageClassification.from_pretrained("fxmarty/tiny-testing-remote-code", trust_remote_code=True) dataset = load_dataset("huggingface/cats-image") image = dataset["test"]["image"][0] inputs = feature_extractor(image, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits # model predicts one of the 1000 ImageNet classes predicted_label = logits.argmax(-1).item() print(model.config.id2label[predicted_label]) ```
[ "tench, tinca tinca", "goldfish, carassius auratus", "great white shark, white shark, man-eater, man-eating shark, carcharodon carcharias", "tiger shark, galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, struthio camelus", "brambling, fringilla montifringilla", "goldfinch, carduelis carduelis", "house finch, linnet, carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, passerina cyanea", "robin, american robin, turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, american eagle, haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, strix nebulosa", "european fire salamander, salamandra salamandra", "common newt, triturus vulgaris", "eft", "spotted salamander, ambystoma maculatum", "axolotl, mud puppy, ambystoma mexicanum", "bullfrog, rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, ascaphus trui", "loggerhead, loggerhead turtle, caretta caretta", "leatherback turtle, leatherback, leathery turtle, dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, iguana iguana", "american chameleon, anole, anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, chlamydosaurus kingi", "alligator lizard", "gila monster, heloderma suspectum", "green lizard, lacerta viridis", "african chameleon, chamaeleo chamaeleon", "komodo dragon, komodo lizard, dragon lizard, giant lizard, varanus komodoensis", "african crocodile, nile crocodile, crocodylus niloticus", "american alligator, alligator mississipiensis", "triceratops", "thunder snake, worm snake, carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, hypsiglena torquata", "boa constrictor, constrictor constrictor", "rock python, rock snake, python sebae", "indian cobra, naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, cerastes cornutus", "diamondback, diamondback rattlesnake, crotalus adamanteus", "sidewinder, horned rattlesnake, crotalus cerastes", "trilobite", "harvestman, daddy longlegs, phalangium opilio", "scorpion", "black and gold garden spider, argiope aurantia", "barn spider, araneus cavaticus", "garden spider, aranea diademata", "black widow, latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "african grey, african gray, psittacus erithacus", "macaw", "sulphur-crested cockatoo, kakatoe galerita, cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, mergus serrator", "goose", "black swan, cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "dungeness crab, cancer magister", "rock crab, cancer irroratus", "fiddler crab", "king crab, alaska crab, alaskan king crab, alaska king crab, paralithodes camtschatica", "american lobster, northern lobster, maine lobster, homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, ciconia ciconia", "black stork, ciconia nigra", "spoonbill", "flamingo", "little blue heron, egretta caerulea", "american egret, great white heron, egretta albus", "bittern", "crane", "limpkin, aramus pictus", "european gallinule, porphyrio porphyrio", "american coot, marsh hen, mud hen, water hen, fulica americana", "bustard", "ruddy turnstone, arenaria interpres", "red-backed sandpiper, dunlin, erolia alpina", "redshank, tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, eschrichtius gibbosus, eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, orcinus orca", "dugong, dugong dugon", "sea lion", "chihuahua", "japanese spaniel", "maltese dog, maltese terrier, maltese", "pekinese, pekingese, peke", "shih-tzu", "blenheim spaniel", "papillon", "toy terrier", "rhodesian ridgeback", "afghan hound, afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "walker hound, walker foxhound", "english foxhound", "redbone", "borzoi, russian wolfhound", "irish wolfhound", "italian greyhound", "whippet", "ibizan hound, ibizan podenco", "norwegian elkhound, elkhound", "otterhound, otter hound", "saluki, gazelle hound", "scottish deerhound, deerhound", "weimaraner", "staffordshire bullterrier, staffordshire bull terrier", "american staffordshire terrier, staffordshire terrier, american pit bull terrier, pit bull terrier", "bedlington terrier", "border terrier", "kerry blue terrier", "irish terrier", "norfolk terrier", "norwich terrier", "yorkshire terrier", "wire-haired fox terrier", "lakeland terrier", "sealyham terrier, sealyham", "airedale, airedale terrier", "cairn, cairn terrier", "australian terrier", "dandie dinmont, dandie dinmont terrier", "boston bull, boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "scotch terrier, scottish terrier, scottie", "tibetan terrier, chrysanthemum dog", "silky terrier, sydney silky", "soft-coated wheaten terrier", "west highland white terrier", "lhasa, lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "labrador retriever", "chesapeake bay retriever", "german short-haired pointer", "vizsla, hungarian pointer", "english setter", "irish setter, red setter", "gordon setter", "brittany spaniel", "clumber, clumber spaniel", "english springer, english springer spaniel", "welsh springer spaniel", "cocker spaniel, english cocker spaniel, cocker", "sussex spaniel", "irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "old english sheepdog, bobtail", "shetland sheepdog, shetland sheep dog, shetland", "collie", "border collie", "bouvier des flandres, bouviers des flandres", "rottweiler", "german shepherd, german shepherd dog, german police dog, alsatian", "doberman, doberman pinscher", "miniature pinscher", "greater swiss mountain dog", "bernese mountain dog", "appenzeller", "entlebucher", "boxer", "bull mastiff", "tibetan mastiff", "french bulldog", "great dane", "saint bernard, st bernard", "eskimo dog, husky", "malamute, malemute, alaskan malamute", "siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "leonberg", "newfoundland, newfoundland dog", "great pyrenees", "samoyed, samoyede", "pomeranian", "chow, chow chow", "keeshond", "brabancon griffon", "pembroke, pembroke welsh corgi", "cardigan, cardigan welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "mexican hairless", "timber wolf, grey wolf, gray wolf, canis lupus", "white wolf, arctic wolf, canis lupus tundrarum", "red wolf, maned wolf, canis rufus, canis niger", "coyote, prairie wolf, brush wolf, canis latrans", "dingo, warrigal, warragal, canis dingo", "dhole, cuon alpinus", "african hunting dog, hyena dog, cape hunting dog, lycaon pictus", "hyena, hyaena", "red fox, vulpes vulpes", "kit fox, vulpes macrotis", "arctic fox, white fox, alopex lagopus", "grey fox, gray fox, urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "persian cat", "siamese cat, siamese", "egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, felis concolor", "lynx, catamount", "leopard, panthera pardus", "snow leopard, ounce, panthera uncia", "jaguar, panther, panthera onca, felis onca", "lion, king of beasts, panthera leo", "tiger, panthera tigris", "cheetah, chetah, acinonyx jubatus", "brown bear, bruin, ursus arctos", "american black bear, black bear, ursus americanus, euarctos americanus", "ice bear, polar bear, ursus maritimus, thalarctos maritimus", "sloth bear, melursus ursinus, ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "angora, angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, sciurus niger", "marmot", "beaver", "guinea pig, cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, sus scrofa", "wild boar, boar, sus scrofa", "warthog", "hippopotamus, hippo, river horse, hippopotamus amphibius", "ox", "water buffalo, water ox, asiatic buffalo, bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, rocky mountain bighorn, rocky mountain sheep, ovis canadensis", "ibex, capra ibex", "hartebeest", "impala, aepyceros melampus", "gazelle", "arabian camel, dromedary, camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, mustela putorius", "black-footed ferret, ferret, mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, bradypus tridactylus", "orangutan, orang, orangutang, pongo pygmaeus", "gorilla, gorilla gorilla", "chimpanzee, chimp, pan troglodytes", "gibbon, hylobates lar", "siamang, hylobates syndactylus, symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, nasalis larvatus", "marmoset", "capuchin, ringtail, cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, ateles geoffroyi", "squirrel monkey, saimiri sciureus", "madagascar cat, ring-tailed lemur, lemur catta", "indri, indris, indri indri, indri brevicaudatus", "indian elephant, elephas maximus", "african elephant, loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, ailurus fulgens", "giant panda, panda, panda bear, coon bear, ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, oncorhynchus kisutch", "rock beauty, holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, biro", "band aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, atm", "cassette", "cassette player", "castle", "catamaran", "cd player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "crock pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "french horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "ipod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, t-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "model t", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, cro", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "polaroid camera, polaroid land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, rv, r.v.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, crt screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, u-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "french loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "granny smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, cypripedium calceolus, cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, polyporus frondosus, grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue" ]