model_id
stringlengths
7
105
model_card
stringlengths
1
130k
model_labels
listlengths
2
80k
asadimtiazmalik/my_traffic_dataset_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_traffic_dataset_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.7616 - Accuracy: 0.6230 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 3.5388 | 0.99 | 71 | 3.4594 | 0.4101 | | 2.9917 | 1.99 | 143 | 2.9101 | 0.5777 | | 2.7402 | 2.97 | 213 | 2.7616 | 0.6230 | ### Framework versions - Transformers 4.32.1 - Pytorch 2.0.1+cu118 - Tokenizers 0.13.3
[ "all_motor_vehicle_prohibited", "axle_load_limit", "compulsary_keep_left", "compulsary_keep_right", "compulsary_minimum_speed", "compulsary_sound_horn", "compulsary_turn_left", "compulsary_turn_left_ahead", "compulsary_turn_right", "compulsary_turn_right_ahead", "cross_road", "cycle_crossing", "barrier_ahead", "cycle_prohibited", "dangerous_dip", "direction", "falling_rocks", "ferry", "gap_in_median", "give_way", "guarded_level_crossing", "handcart_prohibited", "height_limit", "bullock_and_handcart_prohibited", "horn_prohibited", "hump_or_rough_road", "left_hair_pin_bend", "left_hand_curve", "left_reverse_bend", "left_turn_prohibited", "length_limit", "load_limit", "loose_gravel", "men_at_work", "bullock_prohibited", "narrow_bridge", "narrow_road_ahead", "no_entry", "no_parking", "no_stopping_or_standing", "overtaking_prohibited", "pass_either_side", "pedestrian_crossing", "pedestrian_prohibited", "priority_for_oncoming_vehicles", "cattle", "quay_side_or_river_bank", "restriction_ends", "right_hair_pin_bend", "right_hand_curve", "right_reverse_bend", "right_turn_prohibited", "road_widens_ahead", "roundabout", "school_ahead", "side_road_left", "compulsary_ahead", "side_road_right", "slippery_road", "speed_limit_15", "speed_limit_20", "speed_limit_30", "speed_limit_40", "speed_limit_5", "speed_limit_50", "speed_limit_60", "speed_limit_70", "compulsary_ahead_or_turn_left", "speed_limit_80", "staggered_intersection", "steep_ascent", "steep_descent", "stop", "straight_prohibited", "tonga_prohibited", "traffic_signal", "truck_prohibited", "turn_right", "compulsary_ahead_or_turn_right", "t_intersection", "unguarded_level_crossing", "u_turn_prohibited", "width_limit", "y_intersection", "compulsary_cycle_track" ]
dima806/gemstones_image_detection
See https://www.kaggle.com/code/dima806/gemstones-image-detection-vit for more details.
[ "spessartite", "tigers eye", "blue lace agate", "sapphire blue", "sapphire yellow", "pyrite", "scapolite", "turquoise", "opal", "grossular", "tsavorite", "larimar", "moonstone", "serpentine", "jade", "onyx red", "ametrine", "coral", "amazonite", "chrysoberyl", "malachite", "fluorite", "amethyst", "prehnite", "kunzite", "diamond", "carnelian", "quartz smoky", "jasper", "rhodolite", "pyrope", "pearl", "peridot", "dumortierite", "almandine", "aventurine yellow", "sphene", "hessonite", "amber", "tanzanite", "spinel", "aventurine green", "hiddenite", "lapis lazuli", "tourmaline", "alexandrite", "danburite", "sapphire purple", "spodumene", "sodalite", "garnet red", "chalcedony", "kyanite", "rhodochrosite", "goshenite", "variscite", "benitoite", "bixbite", "zircon", "diaspore", "onyx green", "quartz rutilated", "sunstone", "chrome diopside", "aquamarine", "iolite", "emerald", "rhodonite", "cats eye", "beryl golden", "quartz beer", "labradorite", "andalusite", "andradite", "ruby", "quartz rose", "onyx black", "chalcedony blue", "bloodstone", "chrysoprase", "sapphire pink", "morganite", "citrine", "quartz lemon", "zoisite", "chrysocolla", "topaz" ]
jolual2747/vit-model-jose-alcocer
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model-jose-alcocer This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.0879 - Accuracy: 0.7048 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:------:|:----:|:---------------:|:--------:| | 2.197 | 1.6287 | 500 | 1.4172 | 0.6444 | | 0.851 | 3.2573 | 1000 | 1.0879 | 0.7048 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
[ "active accessories", "activewear", "briefcases", "candles and home fragrance", "cardholders", "clutch bags", "coats", "cross-body bags", "cufflinks, pins and clips", "cushions and throws", "denim", "dresses", "all-in-ones", "espadrilles", "fashion jewelry", "fine jewelry", "flats", "formal shoes", "glasses", "gloves", "hair accessories", "hand-held bags", "hats", "backpacks", "heels", "home accessories", "jackets", "jeans", "jewelry", "key rings", "knitwear", "lingerie and nightwear", "loungewear", "luggage and travel bags", "beach accessories", "matching sets", "mini bags", "pants", "pocket squares", "polo shirts", "sandals", "scarves", "shirts", "shorts", "shoulder bags", "beachwear", "skirts", "slippers", "sneakers", "socks", "suits", "sunglasses", "sweats", "swimwear", "t-shirts", "tabletop", "belts", "technology", "ties", "top-handle bags", "tops", "tote bags", "travel accessories", "travel bags", "trousers", "underwear and nightwear", "wallets", "blazers", "wash bags", "watches", "boots", "bow ties" ]
zpschang/my_awesome_food_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_food_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.6523 - Accuracy: 0.886 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7479 | 0.99 | 62 | 2.5911 | 0.799 | | 1.8771 | 2.0 | 125 | 1.8183 | 0.87 | | 1.6422 | 2.98 | 186 | 1.6523 | 0.886 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu117 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
jorgeduardo13/mobilevit-small-finetuned
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mobilevit-small-finetuned This model is a fine-tuned version of [apple/mobilevit-small](https://huggingface.co/apple/mobilevit-small) on the cifar10 dataset. It achieves the following results on the evaluation set: - Loss: 0.6787 - Accuracy: 0.787 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.357 | 1.0 | 351 | 1.0780 | 0.6832 | | 1.068 | 2.0 | 703 | 0.7430 | 0.7722 | | 1.0103 | 2.99 | 1053 | 0.6787 | 0.787 | ### Framework versions - Transformers 4.28.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ]
kwc20140710/my_awesome_food_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_food_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Framework versions - Transformers 4.28.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
kwc20140710/food_classifier
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # kwc20140710/food_classifier This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.4019 - Validation Loss: 0.3216 - Train Accuracy: 0.926 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 20000, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 2.7940 | 1.6967 | 0.797 | 0 | | 1.2507 | 0.9017 | 0.844 | 1 | | 0.7152 | 0.5297 | 0.902 | 2 | | 0.5003 | 0.3919 | 0.915 | 3 | | 0.4019 | 0.3216 | 0.926 | 4 | ### Framework versions - Transformers 4.28.0 - TensorFlow 2.12.0 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
mrlasagna07/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0539 - Accuracy: 0.98 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1412 | 0.99 | 70 | 0.1162 | 0.96 | | 0.0913 | 2.0 | 141 | 0.0539 | 0.98 | | 0.0777 | 2.98 | 210 | 0.0571 | 0.976 | ### Framework versions - Transformers 4.34.1 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
[ "cloud", "land", "space", "sunburn", "water" ]
dima806/galaxy_type_image_detection
Achieved 78% weighted accuracy for classification between 3 common galaxy types (S, E, Sb). See [my Kaggle notebook](https://www.kaggle.com/code/dima806/galaxy-type-image-detection-vit) for more details. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6449300e3adf50d864095b90/g5Gx6ra0hLYDU6BujnhPJ.png) ``` Classification report: precision recall f1-score support E 0.7656 0.8848 0.8209 13592 S 0.7526 0.6685 0.7081 13591 SB 0.8262 0.7900 0.8077 13591 accuracy 0.7811 40774 macro avg 0.7815 0.7811 0.7789 40774 weighted avg 0.7815 0.7811 0.7789 40774 ```
[ "e", "s", "sb" ]
fsuarez/autotrain-image-classification-86974143294
## 📒 image-classification-model This model has undergone training on the "image-classification" dataset, focusing on multi-class classification to categorize specific segments of websites. Each segment corresponds to one of six potential features, encompassing a broad spectrum of web elements, including: - **Button**: Identifying interactive buttons that users can click or tap on for various website functions. - **Textfield**: Recognizing text input fields where users can type or enter information. - **Checkbox**: Detecting checkboxes that users can select or deselect to make choices or indicate preferences. - **Radiobutton**: Identifying radio buttons that allow users to choose a single option from a list. - **Tables**: Recognizing tabular data structures that organize information in rows and columns. - **AppBar**: Detecting app bars or navigation bars typically found at the top of web pages, often containing menus, search bars, or branding elements. This extensive training equips the model with the ability to accurately classify these web elements. # 🧪 Dataset Content The dataset is structured to facilitate the analysis of website components. It includes various types of objects commonly found on websites, such as buttons, text fields, checkboxes, radio buttons, tables, and app bars. Each object type is organized into its respective category within the dataset, allowing for precise classification. | Web Element Category | Quantity of images | |----------------------|--------------------| | Button | 2934 | | Textfield | 100 | | Checkbox | 422 | | Radiobutton | 466 | | Tables | 100 | | AppBar | 100 | # 🤗 Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 86974143294 - CO2 Emissions (in grams): 1.6524 ## 📐 Validation Metrics - Loss: 0.079 - Accuracy: 0.983 - Macro F1: 0.967 - Micro F1: 0.983 - Weighted F1: 0.983 - Macro Precision: 0.971 - Micro Precision: 0.983 - Weighted Precision: 0.983 - Macro Recall: 0.964 - Micro Recall: 0.983 - Weighted Recall: 0.983
[ "appbar", "button", "checkbox", "radiobutton", "table", "textfield" ]
HorcruxNo13/dit-base-finetuned-rvlcdip
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # dit-base-finetuned-rvlcdip This model is a fine-tuned version of [microsoft/dit-base-finetuned-rvlcdip](https://huggingface.co/microsoft/dit-base-finetuned-rvlcdip) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.5940 - Accuracy: 0.87 - Precision: 0.7623 - Recall: 0.87 - F1 Score: 0.8126 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 Score | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:--------:| | No log | 1.0 | 4 | 0.6006 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | No log | 2.0 | 8 | 0.5169 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | No log | 3.0 | 12 | 0.4027 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.5427 | 4.0 | 16 | 0.3865 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.5427 | 5.0 | 20 | 0.3894 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.5427 | 6.0 | 24 | 0.3729 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.5427 | 7.0 | 28 | 0.3707 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.4458 | 8.0 | 32 | 0.3790 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.4458 | 9.0 | 36 | 0.3504 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.4458 | 10.0 | 40 | 0.3356 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.4458 | 11.0 | 44 | 0.4082 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.4369 | 12.0 | 48 | 0.3455 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.4369 | 13.0 | 52 | 0.3074 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.4369 | 14.0 | 56 | 0.3097 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | | 0.4109 | 15.0 | 60 | 0.3173 | 0.8708 | 0.7584 | 0.8708 | 0.8107 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "normal", "abnormal" ]
HorcruxNo13/vit-base-patch16-224
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224 This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.6740 - Accuracy: 0.79 - Precision: 0.7955 - Recall: 0.79 - F1 Score: 0.7923 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 Score | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:--------:| | No log | 1.0 | 4 | 0.5895 | 0.725 | 0.5256 | 0.725 | 0.6094 | | No log | 2.0 | 8 | 0.5737 | 0.725 | 0.5256 | 0.725 | 0.6094 | | No log | 3.0 | 12 | 0.5746 | 0.7333 | 0.6978 | 0.7333 | 0.6589 | | No log | 4.0 | 16 | 0.5449 | 0.7292 | 0.7126 | 0.7292 | 0.6263 | | No log | 5.0 | 20 | 0.5943 | 0.7208 | 0.7362 | 0.7208 | 0.7270 | | No log | 6.0 | 24 | 0.5124 | 0.75 | 0.7360 | 0.75 | 0.6895 | | No log | 7.0 | 28 | 0.6057 | 0.6625 | 0.7301 | 0.6625 | 0.6797 | | No log | 8.0 | 32 | 0.5059 | 0.7583 | 0.7376 | 0.7583 | 0.7214 | | No log | 9.0 | 36 | 0.5734 | 0.7125 | 0.7474 | 0.7125 | 0.7237 | | No log | 10.0 | 40 | 0.5069 | 0.7458 | 0.7182 | 0.7458 | 0.7116 | | No log | 11.0 | 44 | 0.5135 | 0.775 | 0.7659 | 0.775 | 0.7689 | | No log | 12.0 | 48 | 0.4943 | 0.775 | 0.7601 | 0.775 | 0.7610 | | 0.5275 | 13.0 | 52 | 0.5654 | 0.7458 | 0.7790 | 0.7458 | 0.7557 | | 0.5275 | 14.0 | 56 | 0.5257 | 0.7625 | 0.7636 | 0.7625 | 0.7631 | | 0.5275 | 15.0 | 60 | 0.5107 | 0.7875 | 0.7813 | 0.7875 | 0.7836 | | 0.5275 | 16.0 | 64 | 0.5514 | 0.7333 | 0.7655 | 0.7333 | 0.7434 | | 0.5275 | 17.0 | 68 | 0.5004 | 0.7833 | 0.7698 | 0.7833 | 0.7699 | | 0.5275 | 18.0 | 72 | 0.5999 | 0.7125 | 0.7738 | 0.7125 | 0.7269 | | 0.5275 | 19.0 | 76 | 0.4975 | 0.7667 | 0.7554 | 0.7667 | 0.7589 | | 0.5275 | 20.0 | 80 | 0.5120 | 0.7917 | 0.7981 | 0.7917 | 0.7944 | | 0.5275 | 21.0 | 84 | 0.5203 | 0.7833 | 0.7876 | 0.7833 | 0.7853 | | 0.5275 | 22.0 | 88 | 0.5304 | 0.8042 | 0.8051 | 0.8042 | 0.8046 | | 0.5275 | 23.0 | 92 | 0.5475 | 0.825 | 0.825 | 0.825 | 0.8250 | | 0.5275 | 24.0 | 96 | 0.5757 | 0.7458 | 0.7661 | 0.7458 | 0.7531 | | 0.2422 | 25.0 | 100 | 0.5669 | 0.7875 | 0.7829 | 0.7875 | 0.7848 | | 0.2422 | 26.0 | 104 | 0.5489 | 0.7958 | 0.7931 | 0.7958 | 0.7943 | | 0.2422 | 27.0 | 108 | 0.5372 | 0.8 | 0.7982 | 0.8 | 0.7990 | | 0.2422 | 28.0 | 112 | 0.5500 | 0.8208 | 0.8160 | 0.8208 | 0.8176 | | 0.2422 | 29.0 | 116 | 0.5682 | 0.8042 | 0.8033 | 0.8042 | 0.8037 | | 0.2422 | 30.0 | 120 | 0.5899 | 0.8083 | 0.8050 | 0.8083 | 0.8064 | | 0.2422 | 31.0 | 124 | 0.6217 | 0.8 | 0.8063 | 0.8 | 0.8026 | | 0.2422 | 32.0 | 128 | 0.6063 | 0.8125 | 0.8053 | 0.8125 | 0.8068 | | 0.2422 | 33.0 | 132 | 0.5843 | 0.8042 | 0.8033 | 0.8042 | 0.8037 | | 0.2422 | 34.0 | 136 | 0.6020 | 0.8125 | 0.8073 | 0.8125 | 0.8091 | | 0.2422 | 35.0 | 140 | 0.6180 | 0.8042 | 0.8092 | 0.8042 | 0.8063 | | 0.2422 | 36.0 | 144 | 0.6287 | 0.8208 | 0.8171 | 0.8208 | 0.8186 | | 0.2422 | 37.0 | 148 | 0.6231 | 0.825 | 0.8234 | 0.825 | 0.8242 | | 0.0631 | 38.0 | 152 | 0.6260 | 0.8292 | 0.8300 | 0.8292 | 0.8296 | | 0.0631 | 39.0 | 156 | 0.6278 | 0.8333 | 0.8294 | 0.8333 | 0.8308 | | 0.0631 | 40.0 | 160 | 0.6325 | 0.8208 | 0.8200 | 0.8208 | 0.8204 | | 0.0631 | 41.0 | 164 | 0.6370 | 0.8083 | 0.8013 | 0.8083 | 0.8032 | | 0.0631 | 42.0 | 168 | 0.6371 | 0.8125 | 0.8100 | 0.8125 | 0.8111 | | 0.0631 | 43.0 | 172 | 0.6404 | 0.8042 | 0.8016 | 0.8042 | 0.8027 | | 0.0631 | 44.0 | 176 | 0.6640 | 0.8292 | 0.8227 | 0.8292 | 0.8229 | | 0.0631 | 45.0 | 180 | 0.6636 | 0.8208 | 0.8185 | 0.8208 | 0.8195 | | 0.0631 | 46.0 | 184 | 0.6826 | 0.8083 | 0.8122 | 0.8083 | 0.8100 | | 0.0631 | 47.0 | 188 | 0.6756 | 0.8208 | 0.8185 | 0.8208 | 0.8195 | | 0.0631 | 48.0 | 192 | 0.6695 | 0.8292 | 0.8246 | 0.8292 | 0.8261 | | 0.0631 | 49.0 | 196 | 0.6669 | 0.825 | 0.8198 | 0.825 | 0.8213 | | 0.0264 | 50.0 | 200 | 0.6658 | 0.825 | 0.8198 | 0.825 | 0.8213 | ### Framework versions - Transformers 4.33.3 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "normal", "abnormal" ]
HorcruxNo13/swinv2-tiny-patch4-window8-256
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swinv2-tiny-patch4-window8-256 This model is a fine-tuned version of [microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.4461 - Accuracy: 0.73 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 8 | 1.3942 | 0.7333 | | 4.8462 | 2.0 | 16 | 0.5923 | 0.7333 | | 0.8566 | 3.0 | 24 | 0.6450 | 0.7333 | ### Framework versions - Transformers 4.32.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "tench, tinca tinca", "goldfish, carassius auratus", "great white shark, white shark, man-eater, man-eating shark, carcharodon carcharias", "tiger shark, galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, struthio camelus", "brambling, fringilla montifringilla", "goldfinch, carduelis carduelis", "house finch, linnet, carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, passerina cyanea", "robin, american robin, turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, american eagle, haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, strix nebulosa", "european fire salamander, salamandra salamandra", "common newt, triturus vulgaris", "eft", "spotted salamander, ambystoma maculatum", "axolotl, mud puppy, ambystoma mexicanum", "bullfrog, rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, ascaphus trui", "loggerhead, loggerhead turtle, caretta caretta", "leatherback turtle, leatherback, leathery turtle, dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, iguana iguana", "american chameleon, anole, anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, chlamydosaurus kingi", "alligator lizard", "gila monster, heloderma suspectum", "green lizard, lacerta viridis", "african chameleon, chamaeleo chamaeleon", "komodo dragon, komodo lizard, dragon lizard, giant lizard, varanus komodoensis", "african crocodile, nile crocodile, crocodylus niloticus", "american alligator, alligator mississipiensis", "triceratops", "thunder snake, worm snake, carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, hypsiglena torquata", "boa constrictor, constrictor constrictor", "rock python, rock snake, python sebae", "indian cobra, naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, cerastes cornutus", "diamondback, diamondback rattlesnake, crotalus adamanteus", "sidewinder, horned rattlesnake, crotalus cerastes", "trilobite", "harvestman, daddy longlegs, phalangium opilio", "scorpion", "black and gold garden spider, argiope aurantia", "barn spider, araneus cavaticus", "garden spider, aranea diademata", "black widow, latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "african grey, african gray, psittacus erithacus", "macaw", "sulphur-crested cockatoo, kakatoe galerita, cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, mergus serrator", "goose", "black swan, cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "dungeness crab, cancer magister", "rock crab, cancer irroratus", "fiddler crab", "king crab, alaska crab, alaskan king crab, alaska king crab, paralithodes camtschatica", "american lobster, northern lobster, maine lobster, homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, ciconia ciconia", "black stork, ciconia nigra", "spoonbill", "flamingo", "little blue heron, egretta caerulea", "american egret, great white heron, egretta albus", "bittern", "crane", "limpkin, aramus pictus", "european gallinule, porphyrio porphyrio", "american coot, marsh hen, mud hen, water hen, fulica americana", "bustard", "ruddy turnstone, arenaria interpres", "red-backed sandpiper, dunlin, erolia alpina", "redshank, tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, eschrichtius gibbosus, eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, orcinus orca", "dugong, dugong dugon", "sea lion", "chihuahua", "japanese spaniel", "maltese dog, maltese terrier, maltese", "pekinese, pekingese, peke", "shih-tzu", "blenheim spaniel", "papillon", "toy terrier", "rhodesian ridgeback", "afghan hound, afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "walker hound, walker foxhound", "english foxhound", "redbone", "borzoi, russian wolfhound", "irish wolfhound", "italian greyhound", "whippet", "ibizan hound, ibizan podenco", "norwegian elkhound, elkhound", "otterhound, otter hound", "saluki, gazelle hound", "scottish deerhound, deerhound", "weimaraner", "staffordshire bullterrier, staffordshire bull terrier", "american staffordshire terrier, staffordshire terrier, american pit bull terrier, pit bull terrier", "bedlington terrier", "border terrier", "kerry blue terrier", "irish terrier", "norfolk terrier", "norwich terrier", "yorkshire terrier", "wire-haired fox terrier", "lakeland terrier", "sealyham terrier, sealyham", "airedale, airedale terrier", "cairn, cairn terrier", "australian terrier", "dandie dinmont, dandie dinmont terrier", "boston bull, boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "scotch terrier, scottish terrier, scottie", "tibetan terrier, chrysanthemum dog", "silky terrier, sydney silky", "soft-coated wheaten terrier", "west highland white terrier", "lhasa, lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "labrador retriever", "chesapeake bay retriever", "german short-haired pointer", "vizsla, hungarian pointer", "english setter", "irish setter, red setter", "gordon setter", "brittany spaniel", "clumber, clumber spaniel", "english springer, english springer spaniel", "welsh springer spaniel", "cocker spaniel, english cocker spaniel, cocker", "sussex spaniel", "irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "old english sheepdog, bobtail", "shetland sheepdog, shetland sheep dog, shetland", "collie", "border collie", "bouvier des flandres, bouviers des flandres", "rottweiler", "german shepherd, german shepherd dog, german police dog, alsatian", "doberman, doberman pinscher", "miniature pinscher", "greater swiss mountain dog", "bernese mountain dog", "appenzeller", "entlebucher", "boxer", "bull mastiff", "tibetan mastiff", "french bulldog", "great dane", "saint bernard, st bernard", "eskimo dog, husky", "malamute, malemute, alaskan malamute", "siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "leonberg", "newfoundland, newfoundland dog", "great pyrenees", "samoyed, samoyede", "pomeranian", "chow, chow chow", "keeshond", "brabancon griffon", "pembroke, pembroke welsh corgi", "cardigan, cardigan welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "mexican hairless", "timber wolf, grey wolf, gray wolf, canis lupus", "white wolf, arctic wolf, canis lupus tundrarum", "red wolf, maned wolf, canis rufus, canis niger", "coyote, prairie wolf, brush wolf, canis latrans", "dingo, warrigal, warragal, canis dingo", "dhole, cuon alpinus", "african hunting dog, hyena dog, cape hunting dog, lycaon pictus", "hyena, hyaena", "red fox, vulpes vulpes", "kit fox, vulpes macrotis", "arctic fox, white fox, alopex lagopus", "grey fox, gray fox, urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "persian cat", "siamese cat, siamese", "egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, felis concolor", "lynx, catamount", "leopard, panthera pardus", "snow leopard, ounce, panthera uncia", "jaguar, panther, panthera onca, felis onca", "lion, king of beasts, panthera leo", "tiger, panthera tigris", "cheetah, chetah, acinonyx jubatus", "brown bear, bruin, ursus arctos", "american black bear, black bear, ursus americanus, euarctos americanus", "ice bear, polar bear, ursus maritimus, thalarctos maritimus", "sloth bear, melursus ursinus, ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "angora, angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, sciurus niger", "marmot", "beaver", "guinea pig, cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, sus scrofa", "wild boar, boar, sus scrofa", "warthog", "hippopotamus, hippo, river horse, hippopotamus amphibius", "ox", "water buffalo, water ox, asiatic buffalo, bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, rocky mountain bighorn, rocky mountain sheep, ovis canadensis", "ibex, capra ibex", "hartebeest", "impala, aepyceros melampus", "gazelle", "arabian camel, dromedary, camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, mustela putorius", "black-footed ferret, ferret, mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, bradypus tridactylus", "orangutan, orang, orangutang, pongo pygmaeus", "gorilla, gorilla gorilla", "chimpanzee, chimp, pan troglodytes", "gibbon, hylobates lar", "siamang, hylobates syndactylus, symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, nasalis larvatus", "marmoset", "capuchin, ringtail, cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, ateles geoffroyi", "squirrel monkey, saimiri sciureus", "madagascar cat, ring-tailed lemur, lemur catta", "indri, indris, indri indri, indri brevicaudatus", "indian elephant, elephas maximus", "african elephant, loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, ailurus fulgens", "giant panda, panda, panda bear, coon bear, ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, oncorhynchus kisutch", "rock beauty, holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, biro", "band aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, atm", "cassette", "cassette player", "castle", "catamaran", "cd player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "crock pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "french horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "ipod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, t-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "model t", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, cro", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "polaroid camera, polaroid land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, rv, r.v.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, crt screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, u-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "french loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "granny smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, cypripedium calceolus, cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, polyporus frondosus, grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue" ]
volvoDon/petro-daemon
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # volvoDon/petro-daemon This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on a [DataSet of petrologic cross sections](https://huggingface.co/datasets/volvoDon/petrology-sections). It achieves the following results on the evaluation set: - Train Loss: 0.8890 - Validation Loss: 1.1803 - Train Accuracy: 0.6 - Epoch: 19 ## Model description More information needed ## Intended uses & limitations Currently it is just a proof of concept and does a great job identifiying Olivine It currently is not ready for a production enviroment but the results are promising, with an improved dataset I'm confident better results could be acheived. ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 300, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 1.6519 | 1.7095 | 0.2 | 0 | | 1.5905 | 1.6747 | 0.2 | 1 | | 1.5690 | 1.6342 | 0.2 | 2 | | 1.5170 | 1.5931 | 0.2 | 3 | | 1.4764 | 1.5528 | 0.6 | 4 | | 1.3835 | 1.5079 | 0.6 | 5 | | 1.3420 | 1.4717 | 0.6 | 6 | | 1.3171 | 1.4232 | 0.6 | 7 | | 1.2897 | 1.3905 | 0.6 | 8 | | 1.2702 | 1.3794 | 0.6 | 9 | | 1.2023 | 1.3351 | 0.6 | 10 | | 1.1480 | 1.3384 | 0.6 | 11 | | 1.1434 | 1.3419 | 0.6 | 12 | | 1.0499 | 1.3226 | 0.6 | 13 | | 1.0672 | 1.2647 | 0.6 | 14 | | 1.0526 | 1.1533 | 0.6 | 15 | | 1.0184 | 1.1546 | 0.6 | 16 | | 0.9505 | 1.2491 | 0.6 | 17 | | 0.9578 | 1.2809 | 0.4 | 18 | | 0.8890 | 1.1803 | 0.6 | 19 | ### Framework versions - Transformers 4.32.1 - TensorFlow 2.12.0 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "garnet", "kayanite", "olivine", "sillimanite", "staurolite", "titanite", "zircon" ]
elami/vit-base-patch16-224-finetuned-flower
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 2.0.1+cu118 - Datasets 2.7.1 - Tokenizers 0.13.3
[ "daisy", "dandelion", "roses", "sunflowers", "tulips" ]
franciscoafy/vit-base-patch16-224-franciscoflores-classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-franciscoflores-classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0071 - accuracy : 0.9988 ## Model description Transfer learning from a pre-trained image classification model determines which images are of a dog and which ones are of food ## Intended uses & limitations More information needed ## Training and evaluation data This model was trained using the "sasha/dog-food" ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.0618 | 1.9 | 500 | 0.0146 | | 0.0062 | 3.8 | 1000 | 0.0071 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "dog", "food" ]
aditira/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.6460 - Accuracy: 0.884 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7437 | 0.99 | 62 | 2.5588 | 0.831 | | 1.819 | 2.0 | 125 | 1.8089 | 0.863 | | 1.6032 | 2.98 | 186 | 1.6548 | 0.886 | ### Framework versions - Transformers 4.33.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
henkef1/vit-base-patch16-224-finetuned-flower
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-patch16-224-finetuned-flower This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 2.0.1+cu118 - Datasets 2.7.1 - Tokenizers 0.13.3
[ "daisy", "dandelion", "roses", "sunflowers", "tulips" ]
volvoDon/flwr-ViT
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # flwr-ViT This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.5345 - Validation Loss: 1.5286 - Train Accuracy: 0.6574 - Epoch: 3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 15, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 1.5388 | 1.5286 | 0.6574 | 0 | | 1.5339 | 1.5286 | 0.6574 | 1 | | 1.5344 | 1.5286 | 0.6574 | 2 | | 1.5345 | 1.5286 | 0.6574 | 3 | ### Framework versions - Transformers 4.33.0 - TensorFlow 2.12.0 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "daisy", "dandelion", "rose", "sunflower", "tulip" ]
RaymundoSGlz/vit_model_beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit_model_beans This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0310 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
RaymundoSGlz/vit_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0358 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1474 | 3.85 | 500 | 0.0358 | 0.9925 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.4 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
DataBindu/swinv2-large-patch4-window12to24-192to384-22kto1k-ft-microbes
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swinv2-large-patch4-window12to24-192to384-22kto1k-ft-microbes This model is a fine-tuned version of [microsoft/swinv2-large-patch4-window12to24-192to384-22kto1k-ft](https://huggingface.co/microsoft/swinv2-large-patch4-window12to24-192to384-22kto1k-ft) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.0311 - Accuracy: 0.7130 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 3.8445 | 0.98 | 15 | 2.8535 | 0.3194 | | 2.1358 | 1.97 | 30 | 1.9654 | 0.4491 | | 1.5947 | 2.95 | 45 | 1.4172 | 0.6204 | | 1.045 | 4.0 | 61 | 1.1698 | 0.6806 | | 0.985 | 4.98 | 76 | 1.1927 | 0.6852 | | 0.775 | 5.97 | 91 | 1.1012 | 0.6898 | | 0.7207 | 6.95 | 106 | 1.0311 | 0.7130 | | 0.6611 | 7.87 | 120 | 1.0311 | 0.6991 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cpu - Datasets 2.14.4 - Tokenizers 0.13.3
[ "actinomycetes_mycolata", "anaerobic type b", "anaerobic suflur bacteria", "beggiatoa", "flexibacter", "fungi", "gao", "haliscomenobacter", "hyphomicrobium", "microscrilla", "microthrix", "nitrosomonas", "nostocoida limicola iii", "nostocoida limicola i_ii", "pao", "sphaerotilus", "spirilla", "spirochaetes", "tetrads", "thiothrix", "type 0041_0675", "type 0092", "type 021n", "type 0411", "type 0581_chloroflexi", "type 0914_0803", "type 0961", "type 1701", "type 1863", "zoogloea", "bristleworm", "crustacean", "dead filament", "elevated polysaccharide", "ferric iron", "fibrous material", "filament impact on floc structure high", "filament impact on floc structure low", "filament impact on floc structure moderate", "flagellate", "floc open_diffuse", "floc strong", "floc weak", "free swimming ciliate", "gastrotrich", "grease", "inert", "iron sulfide", "irregular growth formations", "naked amoebae", "nematode", "normal polysaccharide", "oil", "rotifer", "stalked ciliate", "testate amoebae", "type 1851", "water bear", "yeast" ]
mcQuantum/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0792 - Accuracy: 0.9726 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.226 | 1.0 | 190 | 0.1455 | 0.9519 | | 0.199 | 2.0 | 380 | 0.0931 | 0.9681 | | 0.1476 | 3.0 | 570 | 0.0792 | 0.9726 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
aditira/emotion_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # emotion_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.3327 - Accuracy: 0.4875 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.8526 | 1.0 | 10 | 1.8929 | 0.3563 | | 1.7464 | 2.0 | 20 | 1.7105 | 0.3625 | | 1.6096 | 3.0 | 30 | 1.5898 | 0.4625 | | 1.4988 | 4.0 | 40 | 1.5056 | 0.5188 | | 1.4218 | 5.0 | 50 | 1.4349 | 0.4938 | | 1.3439 | 6.0 | 60 | 1.4127 | 0.525 | | 1.2799 | 7.0 | 70 | 1.3780 | 0.55 | | 1.2037 | 8.0 | 80 | 1.3463 | 0.5 | | 1.1637 | 9.0 | 90 | 1.3236 | 0.55 | | 1.1361 | 10.0 | 100 | 1.2950 | 0.5437 | | 1.0836 | 11.0 | 110 | 1.3059 | 0.525 | | 1.046 | 12.0 | 120 | 1.2707 | 0.525 | | 1.0277 | 13.0 | 130 | 1.2686 | 0.5563 | | 1.0236 | 14.0 | 140 | 1.2790 | 0.5062 | | 0.9926 | 15.0 | 150 | 1.2763 | 0.5687 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
erdonghu/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.1218 - Accuracy: 0.9567 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.4385 | 0.99 | 47 | 0.2195 | 0.9322 | | 0.2496 | 2.0 | 95 | 0.1507 | 0.9530 | | 0.2013 | 2.97 | 141 | 0.1218 | 0.9567 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu117 - Datasets 2.13.1 - Tokenizers 0.13.3
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
DHEIVER/Modelo-de-Classificacao-de-Glaucoma
Modelo de Classificação de Glaucoma em Imagens de Fundo de Olho (Swin Transformer) **Resumo Rápido** Este modelo utiliza uma arquitetura Swin Transformer e foi ajustado finamente em imagens de fundo de olho retinal do [conjunto de dados do desafio REFUGE](https://refuge.grand-challenge.org/). O modelo é especializado na detecção automatizada de glaucoma em fotografias de fundo de olho retinal. Por meio da extração de características visuais hierárquicas em imagens de fundo de olho, o modelo classifica efetivamente cada imagem como glaucoma ou não glaucoma. Experimentos extensivos demonstram que este modelo alcança desempenho de ponta no benchmark REFUGE para classificação de glaucoma com base em imagens de fundo. Para obter as melhores previsões, recomenda-se o uso de fotografias padronizadas de fundo de olho, capturadas de acordo com os protocolos típicos de imagem de fundo. **Detalhes do Modelo** **Usos** Este modelo pré-treinado oferece funcionalidade de classificação de glaucoma exclusivamente com base na análise de imagens de fundo de olho retinal. Você pode utilizar o modelo diretamente e sem modificações para classificar imagens de fundo de olho como glaucoma ou não glaucoma. Ele é especializado em extrair características discriminativas de imagens de fundo de olho para identificar manifestações de glaucoma. No entanto, para alcançar o melhor desempenho, é altamente recomendável ajustar finamente o modelo em um conjunto de dados representativo de imagens de fundo de olho antes de usá-lo em aplicações do mundo real. **Viés, Riscos e Limitações** Este modelo é especializado na análise de imagens de fundo de olho retinal e é treinado exclusivamente em conjuntos de dados de imagens de fundo de olho para classificar imagens como glaucoma ou não glaucoma. Portanto, para obter previsões precisas, é crucial fornecer apenas imagens de fundo de olho ao usar este modelo. O uso de outros tipos de imagens pode levar a resultados sem sentido. Em resumo, este modelo requer imagens de fundo de olho como entrada para a classificação de glaucoma. Para obter o melhor desempenho, siga rigorosamente esta especificação de entrada. **Como Começar com o Modelo** Para começar a usar o modelo, você pode usar o código abaixo: ```python import cv2 import torch from transformers import AutoImageProcessor, Swinv2ForImageClassification imagem = cv2.imread('./example.jpg') imagem = cv2.cvtColor(imagem, cv2.COLOR_BGR2RGB) processador = AutoImageProcessor.from_pretrained("pamixsun/swinv2_tiny_for_glaucoma_classification") modelo = Swinv2ForImageClassification.from_pretrained("pamixsun/swinv2_tiny_for_glaucoma_classification") entradas = processador(imagem, return_tensors="pt") with torch.no_grad(): logits = modelo(**entradas).logits # O modelo prevê glaucoma ou não glaucoma. rótulo_previsto = logits.argmax(-1).item() print(modelo.config.id2label[rótulo_previsto]) ``` **Marketing** Se você busca uma solução confiável e eficaz para a detecção de glaucoma em imagens de fundo de olho, nosso modelo baseado em Swin Transformer é a escolha certa. Treinado com rigor no conjunto de dados REFUGE, ele oferece resultados de alto desempenho e é especializado em identificar glaucoma de forma automatizada. Use-o para uma avaliação rápida e precisa de imagens de fundo de olho retinal. Simplifique a detecção de glaucoma com o nosso modelo avançado de classificação de imagens. Simplifique.
[ "non-glaucoma", "glaucoma" ]
dima806/footwear_image_detection
See https://www.kaggle.com/code/dima806/shoe-vs-sandal-vs-boot-image-detection for more details.
[ "sandal", "boot", "shoe" ]
OttoYu/Tree-Inspection
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 87833143598 - CO2 Emissions (in grams): 2.1482 ## Validation Metrics - Loss: 1.251 - Accuracy: 0.652 - Macro F1: 0.594 - Micro F1: 0.652 - Weighted F1: 0.620 - Macro Precision: 0.629 - Micro Precision: 0.652 - Weighted Precision: 0.642 - Macro Recall: 0.617 - Micro Recall: 0.652 - Weighted Recall: 0.652
[ "asymmetric tree canopy 樹冠不對稱", "bleedingsap flow 滲液", "crown reducedexcessively thinned topped 樹冠大幅減少削頂截頭", "cut pruning roots 根部經切割或截根", "dead branches hangers 枯死 懸吊斷枝", "dead surface roots 根部腐壞", "dieback twigs 枯枝", "epicormics 水橫枝", "exposed dead wood 枯幹外露", "fungal fruiting bodies 呈現真菌子實體", "girdling roots 纏繞根", "heavy crown load 樹冠負荷太重", "bulge 腫脹", "heavy lateral limb 重側枝", "included bark 內夾樹皮", "lion’s tailing 獅尾", "multiple stems 多枝幹", "parasitic or epiphytic plants 寄生附生植物", "root flare collar not visible 根脊不現", "root-plate movement 根基移位", "signs of pests and disease 呈現病蟲害徵狀", "termites borers injury 白蟻或蛀心蟲蛀蝕", "wounds mechanical injury 明顯傷痕機械破損", "cavities 樹洞", "co-dominant branches 等勢枝", "co-dominant trunks 等勢幹", "cracks or fissures on branches 樹幹有裂縫或裂開", "cracks splits 裂縫 裂開", "crooks abrupt bends 不常規彎曲", "cross branches 疊枝" ]
DawidTobolski/swin-tiny-patch4-window7-224-fine-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-fine-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0501 - Accuracy: 0.9841 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2682 | 1.0 | 190 | 0.1162 | 0.9626 | | 0.1964 | 2.0 | 380 | 0.0754 | 0.9763 | | 0.1559 | 3.0 | 570 | 0.0650 | 0.9785 | | 0.1403 | 4.0 | 760 | 0.0771 | 0.9726 | | 0.1104 | 5.0 | 950 | 0.0501 | 0.9841 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
jjluo/my_awesome_mingliangqiangu_model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_mingliangqiangu_model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1140 - Accuracy: 0.9981 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.7575 | 0.99 | 67 | 1.3989 | 0.9287 | | 0.4806 | 2.0 | 135 | 0.4502 | 0.9935 | | 0.2902 | 2.99 | 202 | 0.2922 | 0.9944 | | 0.2073 | 4.0 | 270 | 0.2118 | 0.9981 | | 0.1975 | 4.99 | 337 | 0.1831 | 0.9963 | | 0.1514 | 6.0 | 405 | 0.1576 | 0.9935 | | 0.1282 | 6.99 | 472 | 0.1290 | 1.0 | | 0.1224 | 8.0 | 540 | 0.1317 | 0.9963 | | 0.1147 | 8.99 | 607 | 0.1127 | 1.0 | | 0.1129 | 9.93 | 670 | 0.1140 | 0.9981 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "zhujiulunyingxiong", "wangqichuxian", "jiangjinghuaxiangzhuan", "chibizhizhanchangjing", "hanmumuzhuan", "baidituogu", "yaoqianshu", "zhibaiwuzhuqian", "tongwuzhuqiandiezhujian", "shuochangyong", "sangumaoluyulongzhongduicechangjing", "longzhonggengdu", "liubeirushu", "liubeizhishumingwendizhuan", "liubeichengdi" ]
dima806/bird_species_image_detection
See https://www.kaggle.com/code/dima806/bird-species-detection for details.
[ "kiwi", "burchells courser", "plush crested jay", "bar-tailed godwit", "anianiau", "gouldian finch", "wattled lapwing", "emu", "cabots tragopan", "horned guan", "pygmy kingfisher", "yellow headed blackbird", "smiths longspur", "puna teal", "inca tern", "crested oropendola", "marabou stork", "scarlet crowned fruit dove", "grey plover", "violet turaco", "indian pitta", "ivory gull", "green winged dove", "zebra dove", "azaras spinetail", "king eider", "satyr tragopan", "cape rock thrush", "banded pita", "caatinga cacholote", "red winged blackbird", "great potoo", "altamira yellowthroat", "rufous kingfisher", "northern beardless tyrannulet", "varied thrush", "black baza", "common firecrest", "elliots pheasant", "java sparrow", "razorbill", "emperor penguin", "green jay", "black throated bushtit", "indigo bunting", "asian green bee eater", "military macaw", "long-eared owl", "japanese robin", "great xenops", "scarlet macaw", "wild turkey", "lucifer hummingbird", "grey cuckooshrike", "quetzal", "blue grouse", "mourning dove", "spotted whistling duck", "ashy thrushbird", "gray partridge", "palila", "great gray owl", "northern shoveler", "european goldfinch", "green magpie", "groved billed ani", "ashy storm petrel", "frigate", "black-necked grebe", "black skimmer", "crimson sunbird", "azure jay", "turkey vulture", "mckays bunting", "ring-necked pheasant", "malachite kingfisher", "verdin", "european turtle dove", "crested serpent eagle", "african pied hornbill", "chipping sparrow", "andean siskin", "northern fulmar", "eastern wip poor will", "pomarine jaeger", "hooded merganser", "ruddy shelduck", "whimbrel", "clarks nutcracker", "common iora", "flame bowerbird", "short billed dowitcher", "superb starling", "white necked raven", "vermilion flycather", "hamerkop", "northern mockingbird", "enggano myna", "asian openbill stork", "yellow cacique", "jacobin pigeon", "spoon biled sandpiper", "azure tanager", "american wigeon", "hyacinth macaw", "rose breasted grosbeak", "emerald tanager", "red headed woodpecker", "barn owl", "kakapo", "double brested cormarant", "forest wagtail", "golden eagle", "bornean bristlehead", "lilac roller", "crested kingfisher", "limpkin", "abbotts babbler", "banded stilt", "eastern bluebonnet", "asian crested ibis", "cactus wren", "avadavat", "crab plover", "greater prairie chicken", "double barred finch", "guinea turaco", "common starling", "eurasian magpie", "malagasy white eye", "fasciated wren", "d-arnauds barbet", "barn swallow", "tree swallow", "eurasian golden oriole", "shoebill", "black breasted puffbird", "sunbittern", "ibisbill", "red headed duck", "touchan", "go away bird", "california gull", "american goldfinch", "house finch", "brewers blackbird", "black francolin", "rufous trepe", "black faced spoonbill", "blue malkoha", "common house martin", "okinawa rail", "rainbow lorikeet", "ocellated turkey", "glossy ibis", "grey headed fish eagle", "california quail", "cerulean warbler", "fairy tern", "iwi", "eared pita", "pink robin", "masked booby", "wall creaper", "maleo", "azure tit", "amethyst woodstar", "american avocet", "myna", "anhinga", "veery", "eurasian bullfinch", "noisy friarbird", "squacco heron", "orange breasted trogon", "stripped swallow", "red bellied pitta", "darjeeling woodpecker", "says phoebe", "red crossbill", "dalmatian pelican", "golden bower bird", "red tailed thrush", "painted bunting", "barrows goldeneye", "evening grosbeak", "african firefinch", "house sparrow", "bearded barbet", "andean goose", "chattering lory", "flame tanager", "grandala", "vulturine guineafowl", "cuban trogon", "baltimore oriole", "magpie goose", "wrentit", "tit mouse", "laughing gull", "coppery tailed coucal", "canary", "scarlet ibis", "great argus", "crested fireback", "cassowary", "surf scoter", "red faced cormorant", "purple martin", "loggerhead shrike", "gang gang cockatoo", "brown crepper", "auckland shaq", "canvasback", "bobolink", "kookaburra", "bald eagle", "american redstart", "rosy faced lovebird", "black vulture", "northern parula", "blue coau", "abbotts booby", "himalayan bluetail", "fairy bluebird", "red browed finch", "chukar partridge", "crane hawk", "red billed tropicbird", "white browed crake", "coppersmith barbet", "black throated warbler", "kagu", "northern flicker", "indian roller", "blue gray gnatcatcher", "alpine chough", "eastern bluebird", "masked bobwhite", "australasian figbird", "collared aracari", "hoatzin", "bearded bellbird", "blue grosbeak", "ivory billed aracari", "abyssinian ground hornbill", "bay-breasted warbler", "gilded flicker", "regent bowerbird", "eastern meadowlark", "ruby throated hummingbird", "mangrove cuckoo", "ecuadorian hillstar", "yellow breasted chat", "parus major", "eastern rosella", "red knot", "striped owl", "striated caracara", "parakett auklet", "gray catbird", "fire tailled myzornis", "roadrunner", "splendid wren", "cock of the rock", "bananaquit", "cream colored woodpecker", "andean lapwing", "mandrin duck", "steamer duck", "bornean pheasant", "great kiskadee", "hepatic tanager", "egyptian goose", "fiordland penguin", "jocotoco antpitta", "cape glossy starling", "bali starling", "banded broadbill", "blue throated toucanet", "black cockato", "snow partridge", "stork billed kingfisher", "carmine bee-eater", "sri lanka blue magpie", "apapane", "ovenbird", "bush turkey", "oilbird", "black swan", "takahe", "dusky lory", "hawfinch", "masked lapwing", "cape longclaw", "himalayan monal", "demoiselle crane", "crested shriketit", "african emerald cuckoo", "cockatoo", "blue dacnis", "rock dove", "sand martin", "rough leg buzzard", "townsends warbler", "golden cheeked warbler", "gila woodpecker", "stripped manakin", "umbrella bird", "dark eyed junco", "bird of paradise", "common poorwill", "purple finch", "american pipit", "gurneys pitta", "lark bunting", "red wiskered bulbul", "snowy plover", "african crowned crane", "red naped trogon", "dunlin", "black and yellow broadbill", "northern red bishop", "taiwan magpie", "caspian tern", "white tailed tropic", "peregrine falcon", "trumpter swan", "brandt cormarant", "eastern towee", "clarks grebe", "ostrich", "red bearded bee eater", "bearded reedling", "snowy egret", "purple swamphen", "asian dollard bird", "violet backed starling", "willow ptarmigan", "cuban tody", "downy woodpecker", "paradise tanager", "band tailed guan", "nicobar pigeon", "knob billed duck", "king vulture", "daurian redstart", "northern gannet", "chucao tapaculo", "crow", "blood pheasant", "harlequin quail", "annas hummingbird", "harpy eagle", "black throated huet", "golden pipit", "campo flicker", "tasmanian hen", "black vented shearwater", "violet green swallow", "blackburniam warbler", "tricolored blackbird", "apostlebird", "horned lark", "crested nuthatch", "common grackle", "cedar waxwing", "chinese pond heron", "scarlet faced liocichla", "rudy kingfisher", "chara de collar", "roseate spoonbill", "brown noody", "ornate hawk eagle", "white eared hummingbird", "california condor", "white crested hornbill", "araripe manakin", "looney birds", "wood thrush", "greator sage grouse", "wood duck", "barred puffbird", "gambels quail", "grey headed chachalaca", "azure breasted pitta", "alexandrine parakeet", "cape may warbler", "african pygmy goose", "golden pheasant", "wilsons bird of paradise", "phainopepla", "turquoise motmot", "jandaya parakeet", "eastern yellow robin", "black headed caique", "gyrfalcon", "victoria crowned pigeon", "great tinamou", "red shouldered hawk", "black-throated sparrow", "yellow bellied flowerpecker", "palm nut vulture", "mikado pheasant", "american robin", "puffin", "american bittern", "bufflehead", "jabiru", "philippine eagle", "little auk", "ruby crowned kinglet", "harlequin duck", "northern cardinal", "pyrrhuloxia", "black-capped chickadee", "red legged honeycreeper", "inland dotterel", "indigo flycatcher", "scarlet tanager", "crested caracara", "eastern golden weaver", "oyster catcher", "gray kingbird", "killdear", "fairy penguin", "woodland kingfisher", "violet cuckoo", "baikal teal", "bulwers pheasant", "sandhill crane", "peacock", "frill back pigeon", "red faced warbler", "curl crested aracuri", "crested auklet", "spangled cotinga", "alberts towhee", "capuchinbird", "purple gallinule", "merlin", "parakett auklet", "malabar hornbill", "tailorbird", "fan tailed widow", "chestnut winged cuckoo", "american coot", "rose breasted cockatoo", "crested coua", "chestnet bellied euphonia", "wattled curassow", "american flamingo", "hawaiian goose", "belted kingfisher", "cinnamon flycatcher", "dusky robin", "imperial shaq", "brown thrasher", "iberian magpie", "elegant trogon", "horned sungem", "gold wing warbler", "greater pewee", "lazuli bunting", "green broadbill", "red fody", "samatran thrush", "fiery minivet", "black necked stilt", "cinnamon attila", "snow goose", "northern jacana", "antbird", "snowy sheathbill", "tawny frogmouth", "visayan hornbill", "northern goshawk", "white throated bee eater", "venezuelian troupial", "bornean leafbird", "red tailed hawk", "hoopoes", "oriental bay owl", "great jacamar", "rufuos motmot", "orange brested bunting", "swinhoes pheasant", "albatross", "osprey", "indian vulture", "double eyed fig parrot", "brown headed cowbird", "spotted catbird", "mallard duck", "guineafowl", "blue heron", "cinnamon teal", "blonde crested woodpecker", "collared crescentchest", "patagonian sierra finch", "antillean euphonia", "bald ibis", "golden chlorophonia", "indian bustard", "sora", "capped heron", "snowy owl", "austral canastero", "black tail crake", "white breasted waterhen", "royal flycatcher", "jack snipe", "white cheeked turaco", "crimson chat", "crested wood partridge", "american dipper", "common loon", "tropical kingbird", "helmet vanga", "african oyster catcher", "golden parakeet", "chinese bamboo partridge", "teal duck", "blue throated piping guan", "lesser adjutant", "american kestrel" ]
dima806/dogs_70_breeds_image_detection
Predicts dogs breed based on an image. Achieved about 92% accuracy on unseen (test) data. See [my Kaggle notebook](https://www.kaggle.com/code/dima806/70-dog-breed-image-detection-vit) and [my Medium article](https://medium.com/gitconnected/paws-and-pixels-creating-a-dog-breeds-classifier-with-googles-vision-transformer-431137422830) for more details. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6449300e3adf50d864095b90/KdUJH1qZauUr5kargckRa.png) ``` Classification report: precision recall f1-score support Afghan 0.9753 0.9080 0.9405 87 African Wild Dog 1.0000 1.0000 1.0000 88 Airedale 1.0000 0.9885 0.9942 87 American Hairless 0.9841 0.7126 0.8267 87 American Spaniel 0.9054 0.7701 0.8323 87 Basenji 0.9659 0.9770 0.9714 87 Basset 0.9551 0.9770 0.9659 87 Beagle 0.9659 0.9659 0.9659 88 Bearded Collie 0.8614 1.0000 0.9255 87 Bermaise 0.9457 1.0000 0.9721 87 Bichon Frise 0.9551 0.9770 0.9659 87 Blenheim 0.9062 1.0000 0.9508 87 Bloodhound 0.9659 0.9770 0.9714 87 Bluetick 1.0000 0.9540 0.9765 87 Border Collie 0.8830 0.9540 0.9171 87 Borzoi 1.0000 0.9432 0.9708 88 Boston Terrier 0.5513 0.9773 0.7049 88 Boxer 1.0000 0.9655 0.9825 87 Bull Mastiff 0.9655 0.9655 0.9655 87 Bull Terrier 1.0000 0.9885 0.9942 87 Bulldog 0.9583 0.2614 0.4107 88 Cairn 0.8737 0.9540 0.9121 87 Chihuahua 0.9610 0.8409 0.8970 88 Chinese Crested 0.9750 0.8966 0.9341 87 Chow 1.0000 1.0000 1.0000 88 Clumber 0.9884 0.9770 0.9827 87 Cockapoo 0.7238 0.8736 0.7917 87 Cocker 0.9868 0.8621 0.9202 87 Collie 0.9630 0.8966 0.9286 87 Corgi 0.9881 0.9540 0.9708 87 Coyote 0.9560 1.0000 0.9775 87 Dalmation 0.9560 1.0000 0.9775 87 Dhole 0.9765 0.9540 0.9651 87 Dingo 0.8966 0.8966 0.8966 87 Doberman 0.9333 0.9655 0.9492 87 Elk Hound 0.9775 1.0000 0.9886 87 French Bulldog 0.8810 0.8506 0.8655 87 German Sheperd 0.6803 0.9432 0.7905 88 Golden Retriever 0.9767 0.9655 0.9711 87 Great Dane 0.8929 0.8621 0.8772 87 Great Perenees 0.9667 1.0000 0.9831 87 Greyhound 0.9750 0.8966 0.9341 87 Groenendael 0.9062 1.0000 0.9508 87 Irish Spaniel 0.8173 0.9770 0.8901 87 Irish Wolfhound 0.9239 0.9770 0.9497 87 Japanese Spaniel 0.9101 0.9310 0.9205 87 Komondor 0.9885 0.9885 0.9885 87 Labradoodle 0.8750 0.6437 0.7417 87 Labrador 1.0000 0.9091 0.9524 88 Lhasa 0.9231 0.5517 0.6906 87 Malinois 0.9756 0.4598 0.6250 87 Maltese 0.8958 0.9773 0.9348 88 Mex Hairless 0.7870 0.9770 0.8718 87 Newfoundland 0.9438 0.9655 0.9545 87 Pekinese 0.9333 0.9545 0.9438 88 Pit Bull 0.8969 1.0000 0.9457 87 Pomeranian 0.9121 0.9540 0.9326 87 Poodle 0.9759 0.9205 0.9474 88 Pug 0.9529 0.9310 0.9419 87 Rhodesian 0.9130 0.9655 0.9385 87 Rottweiler 0.9556 0.9885 0.9718 87 Saint Bernard 0.9773 0.9885 0.9829 87 Schnauzer 0.8684 0.7586 0.8098 87 Scotch Terrier 0.9506 0.8851 0.9167 87 Shar_Pei 0.9886 1.0000 0.9943 87 Shiba Inu 0.9286 0.8966 0.9123 87 Shih-Tzu 0.6957 0.9195 0.7921 87 Siberian Husky 0.9667 1.0000 0.9831 87 Vizsla 0.9355 0.9886 0.9613 88 Yorkie 0.9457 0.9886 0.9667 88 accuracy 0.9192 6104 macro avg 0.9288 0.9193 0.9161 6104 weighted avg 0.9288 0.9192 0.9161 6104 ```
[ "afghan", "african wild dog", "airedale", "american hairless", "american spaniel", "basenji", "basset", "beagle", "bearded collie", "bermaise", "bichon frise", "blenheim", "bloodhound", "bluetick", "border collie", "borzoi", "boston terrier", "boxer", "bull mastiff", "bull terrier", "bulldog", "cairn", "chihuahua", "chinese crested", "chow", "clumber", "cockapoo", "cocker", "collie", "corgi", "coyote", "dalmation", "dhole", "dingo", "doberman", "elk hound", "french bulldog", "german sheperd", "golden retriever", "great dane", "great perenees", "greyhound", "groenendael", "irish spaniel", "irish wolfhound", "japanese spaniel", "komondor", "labradoodle", "labrador", "lhasa", "malinois", "maltese", "mex hairless", "newfoundland", "pekinese", "pit bull", "pomeranian", "poodle", "pug", "rhodesian", "rottweiler", "saint bernard", "schnauzer", "scotch terrier", "shar_pei", "shiba inu", "shih-tzu", "siberian husky", "vizsla", "yorkie" ]
Abhiram4/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the image_folder dataset. It achieves the following results on the evaluation set: - Loss: 0.1280 - Accuracy: 0.9543 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2667 | 1.0 | 297 | 0.1817 | 0.9377 | | 0.2182 | 2.0 | 594 | 0.1385 | 0.9514 | | 0.1934 | 3.0 | 891 | 0.1280 | 0.9543 | ### Framework versions - Transformers 4.33.0 - Pytorch 2.0.0 - Datasets 2.1.0 - Tokenizers 0.13.3
[ "cnv", "dme", "drusen", "normal" ]
dima806/diamond_types_image_detection
Returns diamond type given an image with about 99% accuracy. See https://www.kaggle.com/code/dima806/diamond-types-image-detection-vit for details. ``` Classification report: precision recall f1-score support pear 1.0000 0.9956 0.9978 2280 cushion 0.9418 1.0000 0.9700 2280 princess 0.9996 0.9890 0.9943 2279 round 0.9964 0.9781 0.9872 2279 marquise 0.9987 0.9882 0.9934 2279 oval 0.9996 0.9904 0.9949 2280 emerald 1.0000 0.9956 0.9978 2279 heart 0.9987 0.9943 0.9965 2280 accuracy 0.9914 18236 macro avg 0.9918 0.9914 0.9915 18236 weighted avg 0.9918 0.9914 0.9915 18236 ```
[ "pear", "cushion", "princess", "round", "marquise", "oval", "emerald", "heart" ]
dima806/67_cat_breeds_image_detection
See https://www.kaggle.com/code/dima806/67-cat-breed-image-detection-vit for more details.
[ "nebelung", "canadian hairless", "oriental tabby", "russian blue", "balinese", "domestic long hair", "tiger", "chinchilla", "domestic short hair", "domestic medium hair", "devon rex", "burmese", "birman", "bengal", "tonkinese", "american shorthair", "torbie", "munchkin", "turkish van", "ragamuffin", "calico", "himalayan", "selkirk rex", "silver", "somali", "tuxedo", "abyssinian", "british shorthair", "american curl", "siberian", "applehead siamese", "tortoiseshell", "cymric", "dilute calico", "pixiebob", "singapura", "havana", "dilute tortoiseshell", "ocicat", "burmilla", "snowshoe", "sphynx - hairless cat", "cornish rex", "american bobtail", "chausie", "persian", "javanese", "tabby", "korat", "exotic shorthair", "american wirehair", "siamese", "egyptian mau", "japanese bobtail", "york chocolate", "norwegian forest cat", "scottish fold", "manx", "oriental short hair", "oriental long hair", "turkish angora", "laperm", "chartreux", "extra-toes cat - hemingway polydactyl", "ragdoll", "bombay", "maine coon" ]
juniorjukeko/img_class_beans
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # img_class_beans This model is a fine-tuned version of [juniorjukeko/img_class_beans](https://huggingface.co/juniorjukeko/img_class_beans) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.1220 - Accuracy: 0.9730 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 143 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1122 | 0.98 | 12 | 0.1660 | 0.9575 | | 0.1356 | 1.96 | 24 | 0.1977 | 0.9421 | | 0.0889 | 2.94 | 36 | 0.1504 | 0.9421 | | 0.0793 | 4.0 | 49 | 0.1477 | 0.9575 | | 0.0578 | 4.98 | 61 | 0.1320 | 0.9653 | | 0.0514 | 5.96 | 73 | 0.1163 | 0.9730 | | 0.0592 | 6.94 | 85 | 0.1363 | 0.9653 | | 0.0532 | 8.0 | 98 | 0.1837 | 0.9382 | | 0.0555 | 8.98 | 110 | 0.1400 | 0.9614 | | 0.0723 | 9.8 | 120 | 0.1220 | 0.9730 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
kensvin/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.5938 - Accuracy: 0.911 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7307 | 0.99 | 62 | 2.5306 | 0.833 | | 1.8698 | 2.0 | 125 | 1.7637 | 0.903 | | 1.5629 | 2.98 | 186 | 1.5856 | 0.915 | ### Framework versions - Transformers 4.33.1 - Pytorch 1.13.1+cu117 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
Zekrom997/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.6302 - Accuracy: 0.883 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7166 | 0.99 | 62 | 2.5345 | 0.842 | | 1.7982 | 2.0 | 125 | 1.7848 | 0.876 | | 1.5772 | 2.98 | 186 | 1.6252 | 0.894 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
yaboidimsum/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.9011 - Accuracy: 0.9605 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 40 | 1.8906 | 0.9605 | | No log | 2.0 | 80 | 1.6868 | 0.9605 | | No log | 3.0 | 120 | 1.6471 | 0.9605 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
octava/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.6432 - Accuracy: 0.3688 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 40 | 1.8982 | 0.3 | | No log | 2.0 | 80 | 1.6882 | 0.3438 | | No log | 3.0 | 120 | 1.6481 | 0.3812 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.2.0.dev20230906 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
RickyIG/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - Loss: 1.6283 - Accuracy: 0.886 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7254 | 0.99 | 62 | 2.5418 | 0.819 | | 1.8131 | 2.0 | 125 | 1.8025 | 0.852 | | 1.5991 | 2.98 | 186 | 1.6367 | 0.889 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
thezeivier/test_grietas_100
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test_grietas_100 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0018 - Accuracy: 0.5833 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 80 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 320 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 1 | 1.1055 | 0.3 | | No log | 2.0 | 3 | 1.0141 | 0.6333 | | No log | 3.0 | 5 | 1.0018 | 0.5833 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "sano", "fisura", "grieta" ]
thezeivier/Grietas_10k
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Grietas_10k This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2970 - Accuracy: 0.8787 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 80 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 320 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3932 | 1.0 | 75 | 0.3814 | 0.849 | | 0.334 | 2.0 | 150 | 0.3357 | 0.8652 | | 0.3247 | 3.0 | 225 | 0.2965 | 0.8832 | | 0.3037 | 4.0 | 300 | 0.2992 | 0.8783 | | 0.2765 | 5.0 | 375 | 0.2970 | 0.8787 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "sano", "fisura", "grieta" ]
acaballero76/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0682 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.148 | 3.85 | 500 | 0.0682 | 0.9850 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
dima806/asl_alphabet_image_detection
See https://www.kaggle.com/code/dima806/asl-alphabet-signs-detection-vit for more details.
[ "h", "z", "d", "x", "k", "v", "i", "j", "t", "w", "e", "m", "g", "nothing", "del", "c", "f", "l", "u", "o", "space", "y", "a", "b", "r", "q", "n", "s", "p" ]
sgenzer/seven-segment-led
# Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 88543143697 - CO2 Emissions (in grams): 0.6090 ## Validation Metrics - Loss: 0.503 - Accuracy: 0.868 - Macro F1: 0.862 - Micro F1: 0.868 - Weighted F1: 0.871 - Macro Precision: 0.871 - Micro Precision: 0.868 - Weighted Precision: 0.883 - Macro Recall: 0.862 - Micro Recall: 0.868 - Weighted Recall: 0.868
[ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9" ]
Saul98lm/prueba
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # prueba This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0232 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1427 | 3.85 | 500 | 0.0232 | 0.9850 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
Monti06/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0114 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1425 | 3.85 | 500 | 0.0114 | 1.0 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
isanchez/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0363 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1392 | 3.85 | 500 | 0.0363 | 0.9850 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
thezeivier/Grietas_10k-Fine-tuning
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Grietas_10k-Fine-tuning This model is a fine-tuned version of [thezeivier/Grietas_10k](https://huggingface.co/thezeivier/Grietas_10k) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3864 - Accuracy: 0.8860 ## Model description More information needed ## Intended uses & limitations Este modelo ha sido diseñado para la clasificación de imágenes de infraestructuras en tres categorías: - Sano (sin daños en la estructura de concreto). - Fisura (daños leves e insignificantes en la estructura de concreto). - Grieta (daños graves y de alto riesgo en la estructura de concreto). Este modelo de visión artificial puede ser una herramienta valiosa para identificar posibles amenazas de colapso en estructuras de concreto en caso de futuros terremotos. Limitaciones: El modelo se ha entrenado exclusivamente con imágenes correspondientes a las tres categorías mencionadas anteriormente y no incorpora información sobre la distancia entre la cámara y la grieta capturada en la imagen. ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters Los siguientes hiperparámetros fueron utilizados durante el entrenamiento: - learning_rate (tasa de aprendizaje): 5e-05 - train_batch_size (tamaño del lote de entrenamiento): 80 - eval_batch_size (tamaño del lote de evaluación): 32 - seed (semilla): 42 - gradient_accumulation_steps (pasos de acumulación de gradientes): 4 - total_train_batch_size (tamaño total del lote de entrenamiento): 320 - optimizer (optimizador): Adam con betas=(0.9,0.999) y epsilon=1e-08 - lr_scheduler_type (tipo de programador de tasa de aprendizaje): lineal - lr_scheduler_warmup_ratio (proporción de calentamiento del programador de tasa de aprendizaje): 0.1 - num_epochs (número de épocas): 100 Estos hiperparámetros fueron utilizados para entrenar el modelo y pueden ser configurados en la parte correspondiente del modelo para replicar las mismas condiciones de entrenamiento. Cada hiperparámetro tiene un impacto en cómo se ajusta el modelo a los datos y puede afectar su rendimiento y velocidad de entrenamiento, por lo que es importante seleccionarlos cuidadosamente. ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.8 | 2 | 1.3737 | 0.3679 | | No log | 2.0 | 5 | 1.0234 | 0.6218 | | No log | 2.8 | 7 | 0.8146 | 0.7254 | | 1.0488 | 4.0 | 10 | 0.6621 | 0.7772 | | 1.0488 | 4.8 | 12 | 0.6295 | 0.8031 | | 1.0488 | 6.0 | 15 | 0.5390 | 0.8083 | | 1.0488 | 6.8 | 17 | 0.4902 | 0.8290 | | 0.4981 | 8.0 | 20 | 0.4645 | 0.8290 | | 0.4981 | 8.8 | 22 | 0.4484 | 0.8497 | | 0.4981 | 10.0 | 25 | 0.4543 | 0.8446 | | 0.4981 | 10.8 | 27 | 0.4325 | 0.8394 | | 0.3669 | 12.0 | 30 | 0.4210 | 0.8497 | | 0.3669 | 12.8 | 32 | 0.4303 | 0.8342 | | 0.3669 | 14.0 | 35 | 0.4170 | 0.8497 | | 0.3669 | 14.8 | 37 | 0.3861 | 0.8601 | | 0.2811 | 16.0 | 40 | 0.3629 | 0.8705 | | 0.2811 | 16.8 | 42 | 0.3982 | 0.8653 | | 0.2811 | 18.0 | 45 | 0.4492 | 0.8290 | | 0.2811 | 18.8 | 47 | 0.4216 | 0.8342 | | 0.2026 | 20.0 | 50 | 0.4614 | 0.8394 | | 0.2026 | 20.8 | 52 | 0.4325 | 0.8446 | | 0.2026 | 22.0 | 55 | 0.4755 | 0.8342 | | 0.2026 | 22.8 | 57 | 0.4175 | 0.8394 | | 0.1709 | 24.0 | 60 | 0.4175 | 0.8497 | | 0.1709 | 24.8 | 62 | 0.4105 | 0.8446 | | 0.1709 | 26.0 | 65 | 0.4140 | 0.8601 | | 0.1709 | 26.8 | 67 | 0.4641 | 0.8394 | | 0.1293 | 28.0 | 70 | 0.4214 | 0.8394 | | 0.1293 | 28.8 | 72 | 0.3802 | 0.8808 | | 0.1293 | 30.0 | 75 | 0.4875 | 0.8290 | | 0.1293 | 30.8 | 77 | 0.3972 | 0.8705 | | 0.1167 | 32.0 | 80 | 0.4853 | 0.8394 | | 0.1167 | 32.8 | 82 | 0.4082 | 0.8549 | | 0.1167 | 34.0 | 85 | 0.3917 | 0.8601 | | 0.1167 | 34.8 | 87 | 0.3573 | 0.8653 | | 0.1034 | 36.0 | 90 | 0.4312 | 0.8497 | | 0.1034 | 36.8 | 92 | 0.4035 | 0.8497 | | 0.1034 | 38.0 | 95 | 0.4413 | 0.8238 | | 0.1034 | 38.8 | 97 | 0.4728 | 0.8446 | | 0.0782 | 40.0 | 100 | 0.3977 | 0.8808 | | 0.0782 | 40.8 | 102 | 0.3449 | 0.8912 | | 0.0782 | 42.0 | 105 | 0.4146 | 0.8808 | | 0.0782 | 42.8 | 107 | 0.4380 | 0.8601 | | 0.083 | 44.0 | 110 | 0.4579 | 0.8497 | | 0.083 | 44.8 | 112 | 0.5234 | 0.8549 | | 0.083 | 46.0 | 115 | 0.4053 | 0.8756 | | 0.083 | 46.8 | 117 | 0.4724 | 0.8394 | | 0.0741 | 48.0 | 120 | 0.4631 | 0.8549 | | 0.0741 | 48.8 | 122 | 0.4351 | 0.8653 | | 0.0741 | 50.0 | 125 | 0.4191 | 0.8756 | | 0.0741 | 50.8 | 127 | 0.3772 | 0.8964 | | 0.067 | 52.0 | 130 | 0.3960 | 0.8808 | | 0.067 | 52.8 | 132 | 0.3749 | 0.8964 | | 0.067 | 54.0 | 135 | 0.4395 | 0.8653 | | 0.067 | 54.8 | 137 | 0.5284 | 0.8342 | | 0.0632 | 56.0 | 140 | 0.3332 | 0.8808 | | 0.0632 | 56.8 | 142 | 0.4342 | 0.8497 | | 0.0632 | 58.0 | 145 | 0.3986 | 0.8756 | | 0.0632 | 58.8 | 147 | 0.4771 | 0.8549 | | 0.063 | 60.0 | 150 | 0.4505 | 0.8497 | | 0.063 | 60.8 | 152 | 0.4023 | 0.8653 | | 0.063 | 62.0 | 155 | 0.5208 | 0.8290 | | 0.063 | 62.8 | 157 | 0.4915 | 0.8601 | | 0.0571 | 64.0 | 160 | 0.4412 | 0.8756 | | 0.0571 | 64.8 | 162 | 0.4554 | 0.8653 | | 0.0571 | 66.0 | 165 | 0.4318 | 0.8653 | | 0.0571 | 66.8 | 167 | 0.4317 | 0.8549 | | 0.0608 | 68.0 | 170 | 0.4509 | 0.8653 | | 0.0608 | 68.8 | 172 | 0.4176 | 0.8705 | | 0.0608 | 70.0 | 175 | 0.5203 | 0.8394 | | 0.0608 | 70.8 | 177 | 0.4375 | 0.8756 | | 0.0478 | 72.0 | 180 | 0.4196 | 0.8601 | | 0.0478 | 72.8 | 182 | 0.4744 | 0.8601 | | 0.0478 | 74.0 | 185 | 0.4362 | 0.8808 | | 0.0478 | 74.8 | 187 | 0.4804 | 0.8653 | | 0.0519 | 76.0 | 190 | 0.4861 | 0.8446 | | 0.0519 | 76.8 | 192 | 0.4605 | 0.8601 | | 0.0519 | 78.0 | 195 | 0.4730 | 0.8394 | | 0.0519 | 78.8 | 197 | 0.4650 | 0.8705 | | 0.0553 | 80.0 | 200 | 0.3864 | 0.8860 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "sano", "fisura", "grieta" ]
Andru/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.1772 - Accuracy: 0.9359 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2447 | 0.99 | 98 | 0.1806 | 0.9323 | | 0.1986 | 1.99 | 197 | 0.1772 | 0.9359 | | 0.1933 | 2.98 | 294 | 0.1772 | 0.9359 | ### Framework versions - Transformers 4.30.0 - Pytorch 2.2.1+cu121 - Datasets 2.18.0 - Tokenizers 0.13.3
[ "buildings", "forest", "glacier", "mountain", "sea", "street" ]
daliapv/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0623 - Accuracy: 0.9774 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1262 | 3.85 | 500 | 0.0623 | 0.9774 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
ArturoGL/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0243 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0484 | 3.85 | 500 | 0.0243 | 0.9925 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
imamassi/Visual_Emotional_Analysis
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Visual_Emotional_Analysis This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.6616 - Accuracy: 0.4437 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.7478 | 1.0 | 10 | 1.8034 | 0.4437 | | 1.6809 | 2.0 | 20 | 1.6970 | 0.4437 | | 1.616 | 3.0 | 30 | 1.6712 | 0.4625 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
ditobagus/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 4.6845 - Accuracy: 0.0626 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6177 | 1.0 | 788 | 4.5441 | 0.0572 | | 0.6328 | 2.0 | 1576 | 4.6145 | 0.0628 | | 0.5851 | 3.0 | 2364 | 4.6799 | 0.0648 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "calling", "clapping", "running", "sitting", "sleeping", "texting", "using_laptop", "cycling", "dancing", "drinking", "eating", "fighting", "hugging", "laughing", "listening_to_music" ]
Abhiram4/swinv2-tiny-patch4-window8-256-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swinv2-tiny-patch4-window8-256-finetuned-eurosat This model is a fine-tuned version of [microsoft/swinv2-tiny-patch4-window8-256](https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0482 - Accuracy: 0.983 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 450 - eval_batch_size: 450 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 1800 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3991 | 1.0 | 46 | 0.2074 | 0.933 | | 0.1629 | 2.0 | 92 | 0.0946 | 0.971 | | 0.1294 | 3.0 | 138 | 0.0692 | 0.977 | | 0.1164 | 4.0 | 184 | 0.0572 | 0.982 | | 0.1028 | 5.0 | 230 | 0.0494 | 0.984 | | 0.0893 | 6.0 | 276 | 0.0487 | 0.982 | | 0.0843 | 7.0 | 322 | 0.0472 | 0.984 | | 0.0805 | 8.0 | 368 | 0.0437 | 0.983 | | 0.0705 | 9.0 | 414 | 0.0523 | 0.982 | | 0.0712 | 10.0 | 460 | 0.0482 | 0.983 | ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cu121 - Datasets 2.14.6 - Tokenizers 0.14.1
[ "cnv", "dme", "drusen", "normal" ]
dini-r-a/image_age_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_age_classification This model is a fine-tuned version of [nateraw/vit-age-classifier](https://huggingface.co/nateraw/vit-age-classifier) on the fair_face dataset. It achieves the following results on the evaluation set: - Loss: 0.9464 - Accuracy: 0.601 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.9107 | 1.0 | 125 | 0.9360 | 0.6065 | | 0.7945 | 2.0 | 250 | 0.9545 | 0.588 | | 1.0256 | 3.0 | 375 | 1.0144 | 0.586 | | 0.7354 | 4.0 | 500 | 0.9726 | 0.594 | | 0.6979 | 5.0 | 625 | 0.9735 | 0.5995 | ### Framework versions - Transformers 4.34.0.dev0 - Pytorch 1.12.1+cu116 - Datasets 2.14.5 - Tokenizers 0.12.1
[ "0-2", "3-9", "10-19", "20-29", "30-39", "40-49", "50-59", "60-69", "more than 70" ]
touchtech/fashion-images-pack-types-vit-large-patch16-224-in21k-v3
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fashion-images-pack-types-vit-large-patch16-224-in21k-v3 This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the touchtech/fashion-images-pack-types dataset. It achieves the following results on the evaluation set: - Loss: 0.0485 - Accuracy: 0.9908 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Accuracy | Validation Loss | |:-------------:|:-----:|:----:|:--------:|:---------------:| | 0.1662 | 1.0 | 1696 | 0.9737 | 0.1074 | | 0.116 | 2.0 | 3392 | 0.9816 | 0.0545 | | 0.091 | 3.0 | 5088 | 0.9850 | 0.0647 | | 0.0601 | 4.0 | 6784 | 0.0502 | 0.9887 | | 0.0438 | 5.0 | 8480 | 0.0485 | 0.9908 | ### Framework versions - Transformers 4.33.0.dev0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "model", "pack-accessory", "pack-top-l3", "pack-bottom-l0", "pack-bottom-l1", "pack-multi", "pack-one-piece-l1", "pack-shoes", "pack-top-bottom", "pack-top-l1", "pack-top-l2" ]
RobVilchis/vit-model-rob-vilchis
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model-rob-vilchis This model is a fine-tuned version of [google/vit-base-patch32-224-in21k](https://huggingface.co/google/vit-base-patch32-224-in21k) on the snacks dataset. It achieves the following results on the evaluation set: - Loss: 0.5765 - Accuracy: 0.8607 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.2646 | 0.83 | 500 | 0.9471 | 0.7361 | | 0.4485 | 1.65 | 1000 | 0.6931 | 0.8084 | | 0.179 | 2.48 | 1500 | 0.7448 | 0.8157 | | 0.052 | 3.31 | 2000 | 0.5765 | 0.8607 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "apple", "banana", "juice", "muffin", "orange", "pineapple", "popcorn", "pretzel", "salad", "strawberry", "waffle", "watermelon", "cake", "candy", "carrot", "cookie", "doughnut", "grape", "hot dog", "ice cream" ]
touchtech/fashion-images-gender-age-vit-large-patch16-224-in21k-v3
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fashion-images-gender-age-vit-large-patch16-224-in21k-v3 This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the touchtech/fashion-images-gender-age dataset. It achieves the following results on the evaluation set: - Loss: 0.0223 - Accuracy: 0.9960 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.1868 | 1.0 | 2457 | 0.0547 | 0.9853 | | 0.1209 | 2.0 | 4914 | 0.0401 | 0.9888 | | 0.1027 | 3.0 | 7371 | 0.0262 | 0.9937 | | 0.0654 | 4.0 | 9828 | 0.0223 | 0.9960 | | 0.0542 | 5.0 | 12285 | 0.0273 | 0.9948 | ### Framework versions - Transformers 4.33.0.dev0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "model-female-adult", "model-female-child", "model-male-adult", "model-male-child", "pack" ]
touchtech/fashion-images-perspectives-vit-large-patch16-224-in21k-v3
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fashion-images-perspectives-vit-large-patch16-224-in21k-v3 This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on the touchtech/fashion-images-perspectives dataset. It achieves the following results on the evaluation set: - Loss: 0.2419 - Accuracy: 0.9246 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.4353 | 1.0 | 3070 | 0.3395 | 0.8812 | | 0.3415 | 2.0 | 6140 | 0.2544 | 0.9192 | | 0.2689 | 3.0 | 9210 | 0.2419 | 0.9246 | | 0.2525 | 4.0 | 12280 | 0.2953 | 0.9192 | | 0.1977 | 5.0 | 15350 | 0.2444 | 0.9356 | ### Framework versions - Transformers 4.33.0.dev0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "model-back-close", "model-back-full", "pack-detail", "pack-front", "pack-side", "pack-top", "model-detail", "model-front-close", "model-front-full", "model-side-close", "model-side-full", "pack-angled", "pack-back", "pack-bottom" ]
eitoi/elk-deer
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # eitoi/elk-deer This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3677 - Validation Loss: 0.2980 - Train Accuracy: 1.0 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 260, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.6723 | 0.6107 | 1.0 | 0 | | 0.5838 | 0.5270 | 1.0 | 1 | | 0.5188 | 0.4509 | 1.0 | 2 | | 0.4333 | 0.3759 | 1.0 | 3 | | 0.3677 | 0.2980 | 1.0 | 4 | ### Framework versions - Transformers 4.33.1 - TensorFlow 2.13.0 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "neg", "pos" ]
dennisjooo/emotion_classification
# Emotion Classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the [FastJobs/Visual_Emotional_Analysis](https://huggingface.co/datasets/FastJobs/Visual_Emotional_Analysis) dataset. In theory, the accuracy for a random guess on this dataset is 0.125 (8 labels and you need to choose one). It achieves the following results on the evaluation set: - Loss: 1.0511 - Accuracy: 0.6687 - Precision: 0.7104 - F1: 0.6713 ## Model description The Vision Transformer base version trained on ImageNet-21K released by Google. Further details can be found on their [repo](https://huggingface.co/google/vit-base-patch16-224-in21k). ## Training and evaluation data ### Data Split Trained on [FastJobs/Visual_Emotional_Analysis](https://huggingface.co/datasets/FastJobs/Visual_Emotional_Analysis) dataset. Used a 4:1 ratio for training and development sets and a random seed of 42. Also used a seed of 42 for batching the data, completely unrelated lol. ### Pre-processing Augmentation The main pre-processing phase for both training and evaluation includes: - Bilinear interpolation to resize the image to (224, 224, 3) because it uses ImageNet images to train the original model - Normalizing images using a mean and standard deviation of [0.5, 0.5, 0.5] just like the original model Other than the aforementioned pre-processing, the training set was augmented using: - Random horizontal & vertical flip - Color jitter - Random resized crop ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine_with_restarts - lr_scheduler_warmup_steps: 150 - num_epochs: 300 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:| | 2.079 | 1.0 | 10 | 2.0895 | 0.0563 | 0.0604 | 0.0521 | | 2.0789 | 2.0 | 20 | 2.0851 | 0.0563 | 0.0602 | 0.0529 | | 2.0717 | 3.0 | 30 | 2.0773 | 0.0813 | 0.0858 | 0.0783 | | 2.0613 | 4.0 | 40 | 2.0658 | 0.125 | 0.1997 | 0.1333 | | 2.0445 | 5.0 | 50 | 2.0483 | 0.1875 | 0.2569 | 0.1934 | | 2.0176 | 6.0 | 60 | 2.0206 | 0.2313 | 0.2692 | 0.2384 | | 1.9894 | 7.0 | 70 | 1.9763 | 0.3063 | 0.3033 | 0.2983 | | 1.9232 | 8.0 | 80 | 1.8912 | 0.3625 | 0.3307 | 0.3194 | | 1.8256 | 9.0 | 90 | 1.7775 | 0.4062 | 0.3531 | 0.3600 | | 1.732 | 10.0 | 100 | 1.6580 | 0.4688 | 0.4158 | 0.4133 | | 1.6406 | 11.0 | 110 | 1.5597 | 0.5 | 0.4358 | 0.4370 | | 1.5584 | 12.0 | 120 | 1.4855 | 0.5125 | 0.4792 | 0.4784 | | 1.4898 | 13.0 | 130 | 1.4248 | 0.5437 | 0.5011 | 0.5098 | | 1.4216 | 14.0 | 140 | 1.3692 | 0.5687 | 0.5255 | 0.5289 | | 1.3701 | 15.0 | 150 | 1.3158 | 0.5687 | 0.5346 | 0.5360 | | 1.3438 | 16.0 | 160 | 1.2842 | 0.5437 | 0.5451 | 0.5098 | | 1.2799 | 17.0 | 170 | 1.2620 | 0.5625 | 0.5169 | 0.5194 | | 1.2481 | 18.0 | 180 | 1.2321 | 0.5938 | 0.6003 | 0.5811 | | 1.1993 | 19.0 | 190 | 1.2108 | 0.5687 | 0.5640 | 0.5412 | | 1.1599 | 20.0 | 200 | 1.1853 | 0.55 | 0.5434 | 0.5259 | | 1.1087 | 21.0 | 210 | 1.1839 | 0.5563 | 0.5670 | 0.5380 | | 1.0757 | 22.0 | 220 | 1.1905 | 0.55 | 0.5682 | 0.5308 | | 0.9985 | 23.0 | 230 | 1.1509 | 0.6375 | 0.6714 | 0.6287 | | 0.9776 | 24.0 | 240 | 1.1048 | 0.6188 | 0.6222 | 0.6127 | | 0.9331 | 25.0 | 250 | 1.1196 | 0.6125 | 0.6345 | 0.6072 | | 0.8887 | 26.0 | 260 | 1.1424 | 0.5938 | 0.6174 | 0.5867 | | 0.879 | 27.0 | 270 | 1.1232 | 0.6062 | 0.6342 | 0.5978 | | 0.8369 | 28.0 | 280 | 1.1172 | 0.6 | 0.6480 | 0.5865 | | 0.7864 | 29.0 | 290 | 1.1285 | 0.5938 | 0.6819 | 0.5763 | | 0.7775 | 30.0 | 300 | 1.0511 | 0.6687 | 0.7104 | 0.6713 | | 0.7281 | 31.0 | 310 | 1.0295 | 0.6562 | 0.6596 | 0.6514 | | 0.7348 | 32.0 | 320 | 1.0398 | 0.6375 | 0.6353 | 0.6319 | | 0.6896 | 33.0 | 330 | 1.0729 | 0.6062 | 0.6205 | 0.6062 | | 0.613 | 34.0 | 340 | 1.0505 | 0.6438 | 0.6595 | 0.6421 | | 0.6034 | 35.0 | 350 | 1.0827 | 0.6375 | 0.6593 | 0.6376 | | 0.6236 | 36.0 | 360 | 1.1271 | 0.6125 | 0.6238 | 0.6087 | | 0.5607 | 37.0 | 370 | 1.0985 | 0.6062 | 0.6254 | 0.6015 | | 0.5835 | 38.0 | 380 | 1.0791 | 0.6375 | 0.6624 | 0.6370 | | 0.5889 | 39.0 | 390 | 1.1300 | 0.6062 | 0.6529 | 0.6092 | | 0.5137 | 40.0 | 400 | 1.1062 | 0.625 | 0.6457 | 0.6226 | | 0.4804 | 41.0 | 410 | 1.1452 | 0.6188 | 0.6403 | 0.6158 | | 0.4811 | 42.0 | 420 | 1.1271 | 0.6375 | 0.6478 | 0.6347 | | 0.5179 | 43.0 | 430 | 1.1942 | 0.5875 | 0.6185 | 0.5874 | | 0.4744 | 44.0 | 440 | 1.1515 | 0.6125 | 0.6329 | 0.6160 | | 0.4327 | 45.0 | 450 | 1.1321 | 0.6375 | 0.6669 | 0.6412 | | 0.4565 | 46.0 | 460 | 1.1742 | 0.625 | 0.6478 | 0.6251 | | 0.4006 | 47.0 | 470 | 1.1675 | 0.6062 | 0.6361 | 0.6079 | | 0.4541 | 48.0 | 480 | 1.1542 | 0.6125 | 0.6404 | 0.6152 | | 0.3689 | 49.0 | 490 | 1.2190 | 0.5875 | 0.6134 | 0.5896 | | 0.3794 | 50.0 | 500 | 1.2002 | 0.6062 | 0.6155 | 0.6005 | | 0.429 | 51.0 | 510 | 1.2904 | 0.575 | 0.6207 | 0.5849 | | 0.431 | 52.0 | 520 | 1.2416 | 0.5875 | 0.6028 | 0.5794 | | 0.3813 | 53.0 | 530 | 1.2073 | 0.6125 | 0.6449 | 0.6142 | | 0.365 | 54.0 | 540 | 1.2083 | 0.6062 | 0.6454 | 0.6075 | | 0.3714 | 55.0 | 550 | 1.1627 | 0.6375 | 0.6576 | 0.6390 | | 0.3393 | 56.0 | 560 | 1.1620 | 0.6438 | 0.6505 | 0.6389 | | 0.3676 | 57.0 | 570 | 1.1501 | 0.625 | 0.6294 | 0.6258 | | 0.3371 | 58.0 | 580 | 1.2779 | 0.5875 | 0.6000 | 0.5792 | | 0.3325 | 59.0 | 590 | 1.2719 | 0.575 | 0.5843 | 0.5651 | | 0.3509 | 60.0 | 600 | 1.2956 | 0.6 | 0.6422 | 0.6059 | ### Framework versions - Transformers 4.33.0 - Pytorch 2.0.0 - Datasets 2.1.0 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
elenaThevalley/mobilenet_v2_1.0_224-finetuned-32bs-0.1lr
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mobilenet_v2_1.0_224-finetuned-32bs-0.1lr This model is a fine-tuned version of [google/mobilenet_v2_1.0_224](https://huggingface.co/google/mobilenet_v2_1.0_224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.9270 - Accuracy: 0.6469 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.1 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.99 | 53 | 1.3949 | 0.4049 | | No log | 1.99 | 107 | 1.0455 | 0.5819 | | No log | 2.96 | 159 | 0.9270 | 0.6469 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "drink", "food", "inside", "menu", "outside" ]
hansin91/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.2378 - Accuracy: 0.5875 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 40 | 2.0656 | 0.125 | | No log | 2.0 | 80 | 2.0558 | 0.1938 | | No log | 3.0 | 120 | 2.0177 | 0.2375 | | No log | 4.0 | 160 | 1.9156 | 0.3438 | | No log | 5.0 | 200 | 1.7849 | 0.3063 | | No log | 6.0 | 240 | 1.6961 | 0.3187 | | No log | 7.0 | 280 | 1.6026 | 0.3937 | | No log | 8.0 | 320 | 1.5455 | 0.3688 | | No log | 9.0 | 360 | 1.4723 | 0.4562 | | No log | 10.0 | 400 | 1.3931 | 0.5 | | No log | 11.0 | 440 | 1.4418 | 0.4375 | | No log | 12.0 | 480 | 1.3306 | 0.4437 | | 1.5855 | 13.0 | 520 | 1.2437 | 0.575 | | 1.5855 | 14.0 | 560 | 1.3712 | 0.4875 | | 1.5855 | 15.0 | 600 | 1.2102 | 0.55 | | 1.5855 | 16.0 | 640 | 1.3217 | 0.5188 | | 1.5855 | 17.0 | 680 | 1.3656 | 0.4938 | | 1.5855 | 18.0 | 720 | 1.3261 | 0.525 | | 1.5855 | 19.0 | 760 | 1.5611 | 0.4625 | | 1.5855 | 20.0 | 800 | 1.4503 | 0.5125 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
kensvin/emotion_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # emotion_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.2024 - Accuracy: 0.6062 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 10 | 1.3600 | 0.4938 | | No log | 2.0 | 20 | 1.2908 | 0.4938 | | No log | 3.0 | 30 | 1.2799 | 0.5 | | No log | 4.0 | 40 | 1.2110 | 0.5312 | | No log | 5.0 | 50 | 1.2178 | 0.5188 | | No log | 6.0 | 60 | 1.2189 | 0.5188 | | No log | 7.0 | 70 | 1.2566 | 0.5375 | | No log | 8.0 | 80 | 1.1838 | 0.5687 | | No log | 9.0 | 90 | 1.2730 | 0.55 | | No log | 10.0 | 100 | 1.2329 | 0.575 | | No log | 11.0 | 110 | 1.2224 | 0.5563 | | No log | 12.0 | 120 | 1.2729 | 0.5563 | | No log | 13.0 | 130 | 1.2678 | 0.5687 | | No log | 14.0 | 140 | 1.2423 | 0.5687 | | No log | 15.0 | 150 | 1.1704 | 0.6312 | | No log | 16.0 | 160 | 1.2925 | 0.5625 | | No log | 17.0 | 170 | 1.3557 | 0.5312 | | No log | 18.0 | 180 | 1.2951 | 0.5687 | | No log | 19.0 | 190 | 1.2594 | 0.5625 | | No log | 20.0 | 200 | 1.2463 | 0.5687 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
SeyedAli/Food-Image-Classification-VIT
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Food-Image-Classification-VIT This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the food101 dataset. It achieves the following results on the evaluation set: - eval_loss: 1.0611 - eval_accuracy: 0.7274 - eval_runtime: 411.0682 - eval_samples_per_second: 61.425 - eval_steps_per_second: 7.68 - epoch: 0.15 - step: 718 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "apple_pie", "baby_back_ribs", "bruschetta", "waffles", "caesar_salad", "cannoli", "caprese_salad", "carrot_cake", "ceviche", "cheesecake", "cheese_plate", "chicken_curry", "chicken_quesadilla", "baklava", "chicken_wings", "chocolate_cake", "chocolate_mousse", "churros", "clam_chowder", "club_sandwich", "crab_cakes", "creme_brulee", "croque_madame", "cup_cakes", "beef_carpaccio", "deviled_eggs", "donuts", "dumplings", "edamame", "eggs_benedict", "escargots", "falafel", "filet_mignon", "fish_and_chips", "foie_gras", "beef_tartare", "french_fries", "french_onion_soup", "french_toast", "fried_calamari", "fried_rice", "frozen_yogurt", "garlic_bread", "gnocchi", "greek_salad", "grilled_cheese_sandwich", "beet_salad", "grilled_salmon", "guacamole", "gyoza", "hamburger", "hot_and_sour_soup", "hot_dog", "huevos_rancheros", "hummus", "ice_cream", "lasagna", "beignets", "lobster_bisque", "lobster_roll_sandwich", "macaroni_and_cheese", "macarons", "miso_soup", "mussels", "nachos", "omelette", "onion_rings", "oysters", "bibimbap", "pad_thai", "paella", "pancakes", "panna_cotta", "peking_duck", "pho", "pizza", "pork_chop", "poutine", "prime_rib", "bread_pudding", "pulled_pork_sandwich", "ramen", "ravioli", "red_velvet_cake", "risotto", "samosa", "sashimi", "scallops", "seaweed_salad", "shrimp_and_grits", "breakfast_burrito", "spaghetti_bolognese", "spaghetti_carbonara", "spring_rolls", "steak", "strawberry_shortcake", "sushi", "tacos", "takoyaki", "tiramisu", "tuna_tartare" ]
NabeelShar/emotion-dectect
--- license: apache-2.0 ### Just Another Project
[ "angry", "happy", "neutral", "sad" ]
Sandra26/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0468 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1624 | 3.85 | 500 | 0.0468 | 0.9925 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
Danielabad/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0289 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1368 | 3.85 | 500 | 0.0289 | 0.9925 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
dyaminda/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.2727 - Accuracy: 0.5312 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.0804 | 1.0 | 10 | 2.0714 | 0.1625 | | 2.0428 | 2.0 | 20 | 2.0324 | 0.2313 | | 1.9463 | 3.0 | 30 | 1.8978 | 0.3438 | | 1.7768 | 4.0 | 40 | 1.7234 | 0.375 | | 1.6163 | 5.0 | 50 | 1.6029 | 0.4188 | | 1.509 | 6.0 | 60 | 1.5122 | 0.5 | | 1.4118 | 7.0 | 70 | 1.4839 | 0.4375 | | 1.3381 | 8.0 | 80 | 1.4268 | 0.475 | | 1.2653 | 9.0 | 90 | 1.4095 | 0.4813 | | 1.1979 | 10.0 | 100 | 1.3504 | 0.5375 | | 1.1219 | 11.0 | 110 | 1.3293 | 0.4875 | | 1.0858 | 12.0 | 120 | 1.3023 | 0.4875 | | 1.0214 | 13.0 | 130 | 1.3063 | 0.5188 | | 1.0085 | 14.0 | 140 | 1.3306 | 0.5312 | | 0.9615 | 15.0 | 150 | 1.2838 | 0.5 | | 0.9277 | 16.0 | 160 | 1.3073 | 0.5125 | | 0.898 | 17.0 | 170 | 1.2606 | 0.5437 | | 0.8747 | 18.0 | 180 | 1.3116 | 0.5437 | | 0.8657 | 19.0 | 190 | 1.3171 | 0.5375 | | 0.8462 | 20.0 | 200 | 1.2619 | 0.525 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
Isaac18/practica_imc
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # practica_imc This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0167 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1401 | 3.85 | 500 | 0.0167 | 0.9925 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
ALEXISLG/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0346 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1534 | 3.85 | 500 | 0.0346 | 0.9925 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
Arlethh/09arly
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 09arly This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0557 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1407 | 3.85 | 500 | 0.0557 | 0.9850 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
IsraelRam/israRam
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IsraRam This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0927 - Accuracy: 0.9699 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1377 | 3.85 | 500 | 0.0927 | 0.9699 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
IsraNva/isranva
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # isranva This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0741 - Accuracy: 0.9850 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1607 | 3.85 | 500 | 0.0741 | 0.9850 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
DiegoFlowers/vit-model
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-model This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0448 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1328 | 3.85 | 500 | 0.0448 | 0.9925 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
TamalDeFrijol/IAFrijol
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # IAFrijol This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0265 - Accuracy: 0.9925 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.142 | 3.85 | 500 | 0.0265 | 0.9925 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
Jofiel/BeansIA
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BeansIA This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0097 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1346 | 3.85 | 500 | 0.0097 | 1.0 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
Saul98lm/prueba2
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # prueba2 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 0.0071 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1508 | 3.85 | 500 | 0.0071 | 1.0 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "angular_leaf_spot", "bean_rust", "healthy" ]
dini-r-a/emotion_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # emotion_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.6256 - Accuracy: 0.5625 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00025 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 10 | 1.7794 | 0.4875 | | No log | 2.0 | 20 | 1.6813 | 0.4938 | | 0.2276 | 3.0 | 30 | 1.7602 | 0.4875 | | 0.2276 | 4.0 | 40 | 1.9172 | 0.4562 | | 0.2048 | 5.0 | 50 | 1.9316 | 0.4625 | | 0.2048 | 6.0 | 60 | 1.8285 | 0.5 | | 0.2048 | 7.0 | 70 | 1.6341 | 0.5687 | | 0.1617 | 8.0 | 80 | 1.7461 | 0.5375 | | 0.1617 | 9.0 | 90 | 1.6544 | 0.5312 | | 0.1766 | 10.0 | 100 | 1.9449 | 0.4875 | | 0.1766 | 11.0 | 110 | 1.7565 | 0.5125 | | 0.1766 | 12.0 | 120 | 1.8936 | 0.5 | | 0.1979 | 13.0 | 130 | 1.6812 | 0.5687 | | 0.1979 | 14.0 | 140 | 1.7619 | 0.5188 | | 0.1694 | 15.0 | 150 | 1.6903 | 0.55 | ### Framework versions - Transformers 4.33.1 - Pytorch 1.12.1+cu116 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
chansuga/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0628 - Accuracy: 0.9778 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2799 | 1.0 | 190 | 0.1301 | 0.9574 | | 0.1848 | 2.0 | 380 | 0.0803 | 0.9711 | | 0.1504 | 3.0 | 570 | 0.0628 | 0.9778 | ### Framework versions - Transformers 4.17.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.14.0
[ "annualcrop", "forest", "herbaceousvegetation", "highway", "industrial", "pasture", "permanentcrop", "residential", "river", "sealake" ]
yfh/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - eval_loss: 1.8405 - eval_accuracy: 0.5563 - eval_runtime: 2.1774 - eval_samples_per_second: 73.483 - eval_steps_per_second: 4.593 - epoch: 43.75 - step: 1750 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 100 ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
agustin228/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the pokemon-classification dataset. It achieves the following results on the evaluation set: - Loss: 0.8072 - Accuracy: 0.8854 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 240 | 2.0511 | 0.7427 | | No log | 2.0 | 480 | 0.9657 | 0.8792 | | 2.3005 | 3.0 | 720 | 0.8118 | 0.8833 | ### Framework versions - Transformers 4.33.3 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "golbat", "machoke", "raichu", "dragonite", "fearow", "slowpoke", "weezing", "beedrill", "weedle", "cloyster", "vaporeon", "gyarados", "golduck", "zapdos", "machamp", "hitmonlee", "primeape", "cubone", "sandslash", "scyther", "haunter", "metapod", "tentacruel", "aerodactyl", "raticate", "kabutops", "ninetales", "zubat", "rhydon", "mew", "pinsir", "ditto", "victreebel", "omanyte", "horsea", "magnemite", "pikachu", "blastoise", "venomoth", "charizard", "seadra", "muk", "spearow", "bulbasaur", "bellsprout", "electrode", "ivysaur", "gloom", "poliwhirl", "flareon", "seaking", "hypno", "wartortle", "mankey", "tentacool", "exeggcute", "meowth", "growlithe", "tangela", "drowzee", "rapidash", "venonat", "omastar", "pidgeot", "nidorino", "porygon", "lickitung", "rattata", "machop", "charmeleon", "slowbro", "parasect", "eevee", "diglett", "starmie", "staryu", "psyduck", "dragonair", "magikarp", "vileplume", "marowak", "pidgeotto", "shellder", "mewtwo", "lapras", "farfetchd", "kingler", "seel", "kakuna", "doduo", "electabuzz", "charmander", "rhyhorn", "tauros", "dugtrio", "kabuto", "poliwrath", "gengar", "exeggutor", "dewgong", "jigglypuff", "geodude", "kadabra", "nidorina", "sandshrew", "grimer", "persian", "mrmime", "pidgey", "koffing", "ekans", "alolan sandslash", "venusaur", "snorlax", "paras", "jynx", "chansey", "weepinbell", "hitmonchan", "gastly", "kangaskhan", "oddish", "wigglytuff", "graveler", "arcanine", "clefairy", "articuno", "poliwag", "golem", "abra", "squirtle", "voltorb", "ponyta", "moltres", "nidoqueen", "magmar", "onix", "vulpix", "butterfree", "dodrio", "krabby", "arbok", "clefable", "goldeen", "magneton", "dratini", "caterpie", "jolteon", "nidoking", "alakazam" ]
NabeelShar/emotions_classifier
<!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # NabeelShar/emotions_classifier This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.2082 - Validation Loss: 2.3230 - Train Accuracy: 0.425 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 0.0003, 'decay_steps': 3200, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.1} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.2082 | 2.3230 | 0.425 | 0 | ### Framework versions - Transformers 4.33.1 - TensorFlow 2.13.0 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
anggtpd/emotion_recognition
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # emotion_recognition This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.6139 - Accuracy: 0.4562 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 5 | 1.9416 | 0.3438 | | 1.8445 | 2.0 | 10 | 1.8517 | 0.3937 | | 1.8445 | 3.0 | 15 | 1.7436 | 0.3875 | | 1.6748 | 4.0 | 20 | 1.6654 | 0.475 | | 1.6748 | 5.0 | 25 | 1.6098 | 0.5062 | | 1.5405 | 6.0 | 30 | 1.5734 | 0.4875 | | 1.5405 | 7.0 | 35 | 1.5446 | 0.4938 | | 1.4603 | 8.0 | 40 | 1.5415 | 0.4938 | | 1.4603 | 9.0 | 45 | 1.5173 | 0.5062 | | 1.4154 | 10.0 | 50 | 1.4983 | 0.5062 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
hilmansw/emotion_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # emotion_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.4084 - Accuracy: 0.45 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 40 | 1.8332 | 0.3375 | | No log | 2.0 | 80 | 1.5977 | 0.3438 | | No log | 3.0 | 120 | 1.4988 | 0.45 | | No log | 4.0 | 160 | 1.4639 | 0.4437 | | No log | 5.0 | 200 | 1.4292 | 0.4188 | | No log | 6.0 | 240 | 1.4092 | 0.4625 | | No log | 7.0 | 280 | 1.3667 | 0.45 | | No log | 8.0 | 320 | 1.3967 | 0.4313 | | No log | 9.0 | 360 | 1.3820 | 0.5062 | | No log | 10.0 | 400 | 1.3740 | 0.4938 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
Mangara01/image_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.7520 - Accuracy: 0.2875 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 40 | 2.0583 | 0.1688 | | No log | 2.0 | 80 | 2.0009 | 0.2562 | | No log | 3.0 | 120 | 1.9418 | 0.2625 | | No log | 4.0 | 160 | 1.8934 | 0.275 | | No log | 5.0 | 200 | 1.8221 | 0.2812 | | No log | 6.0 | 240 | 1.7694 | 0.3 | | No log | 7.0 | 280 | 1.7509 | 0.3063 | | No log | 8.0 | 320 | 1.7311 | 0.2562 | | No log | 9.0 | 360 | 1.7143 | 0.3 | | No log | 10.0 | 400 | 1.7058 | 0.3 | ### Framework versions - Transformers 4.28.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
facebook/dinov2-small-imagenet1k-1-layer
# Vision Transformer (small-sized model) trained using DINOv2 Vision Transformer (ViT) model trained using the DINOv2 method. It was introduced in the paper [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) by Oquab et al. and first released in [this repository](https://github.com/facebookresearch/dinov2). Disclaimer: The team releasing DINOv2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Vision Transformer (ViT) is a transformer encoder model (BERT-like) pretrained on a large collection of images in a self-supervised fashion. Images are presented to the model as a sequence of fixed-size patches, which are linearly embedded. One also adds a [CLS] token to the beginning of a sequence to use it for classification tasks. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Note that this model does not include any fine-tuned heads. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image. ## Intended uses & limitations You can use the model for classifying an image among one of the [1000 ImageNet labels](https://huggingface.co/datasets/huggingface/label-files/blob/main/imagenet-1k-id2label.json). See the [model hub](https://huggingface.co/models?search=facebook/dinov2) to look for other fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python from transformers import AutoImageProcessor, AutoModelForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained('facebook/dinov2-small-imagenet1k-1-layer') model = AutoModelForImageClassification.from_pretrained('facebook/dinov2-small-imagenet1k-1-layer') inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` ### BibTeX entry and citation info ```bibtex misc{oquab2023dinov2, title={DINOv2: Learning Robust Visual Features without Supervision}, author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski}, year={2023}, eprint={2304.07193}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
[ "tench, tinca tinca", "goldfish, carassius auratus", "great white shark, white shark, man-eater, man-eating shark, carcharodon carcharias", "tiger shark, galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, struthio camelus", "brambling, fringilla montifringilla", "goldfinch, carduelis carduelis", "house finch, linnet, carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, passerina cyanea", "robin, american robin, turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, american eagle, haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, strix nebulosa", "european fire salamander, salamandra salamandra", "common newt, triturus vulgaris", "eft", "spotted salamander, ambystoma maculatum", "axolotl, mud puppy, ambystoma mexicanum", "bullfrog, rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, ascaphus trui", "loggerhead, loggerhead turtle, caretta caretta", "leatherback turtle, leatherback, leathery turtle, dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, iguana iguana", "american chameleon, anole, anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, chlamydosaurus kingi", "alligator lizard", "gila monster, heloderma suspectum", "green lizard, lacerta viridis", "african chameleon, chamaeleo chamaeleon", "komodo dragon, komodo lizard, dragon lizard, giant lizard, varanus komodoensis", "african crocodile, nile crocodile, crocodylus niloticus", "american alligator, alligator mississipiensis", "triceratops", "thunder snake, worm snake, carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, hypsiglena torquata", "boa constrictor, constrictor constrictor", "rock python, rock snake, python sebae", "indian cobra, naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, cerastes cornutus", "diamondback, diamondback rattlesnake, crotalus adamanteus", "sidewinder, horned rattlesnake, crotalus cerastes", "trilobite", "harvestman, daddy longlegs, phalangium opilio", "scorpion", "black and gold garden spider, argiope aurantia", "barn spider, araneus cavaticus", "garden spider, aranea diademata", "black widow, latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "african grey, african gray, psittacus erithacus", "macaw", "sulphur-crested cockatoo, kakatoe galerita, cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, mergus serrator", "goose", "black swan, cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "dungeness crab, cancer magister", "rock crab, cancer irroratus", "fiddler crab", "king crab, alaska crab, alaskan king crab, alaska king crab, paralithodes camtschatica", "american lobster, northern lobster, maine lobster, homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, ciconia ciconia", "black stork, ciconia nigra", "spoonbill", "flamingo", "little blue heron, egretta caerulea", "american egret, great white heron, egretta albus", "bittern", "crane", "limpkin, aramus pictus", "european gallinule, porphyrio porphyrio", "american coot, marsh hen, mud hen, water hen, fulica americana", "bustard", "ruddy turnstone, arenaria interpres", "red-backed sandpiper, dunlin, erolia alpina", "redshank, tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, eschrichtius gibbosus, eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, orcinus orca", "dugong, dugong dugon", "sea lion", "chihuahua", "japanese spaniel", "maltese dog, maltese terrier, maltese", "pekinese, pekingese, peke", "shih-tzu", "blenheim spaniel", "papillon", "toy terrier", "rhodesian ridgeback", "afghan hound, afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "walker hound, walker foxhound", "english foxhound", "redbone", "borzoi, russian wolfhound", "irish wolfhound", "italian greyhound", "whippet", "ibizan hound, ibizan podenco", "norwegian elkhound, elkhound", "otterhound, otter hound", "saluki, gazelle hound", "scottish deerhound, deerhound", "weimaraner", "staffordshire bullterrier, staffordshire bull terrier", "american staffordshire terrier, staffordshire terrier, american pit bull terrier, pit bull terrier", "bedlington terrier", "border terrier", "kerry blue terrier", "irish terrier", "norfolk terrier", "norwich terrier", "yorkshire terrier", "wire-haired fox terrier", "lakeland terrier", "sealyham terrier, sealyham", "airedale, airedale terrier", "cairn, cairn terrier", "australian terrier", "dandie dinmont, dandie dinmont terrier", "boston bull, boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "scotch terrier, scottish terrier, scottie", "tibetan terrier, chrysanthemum dog", "silky terrier, sydney silky", "soft-coated wheaten terrier", "west highland white terrier", "lhasa, lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "labrador retriever", "chesapeake bay retriever", "german short-haired pointer", "vizsla, hungarian pointer", "english setter", "irish setter, red setter", "gordon setter", "brittany spaniel", "clumber, clumber spaniel", "english springer, english springer spaniel", "welsh springer spaniel", "cocker spaniel, english cocker spaniel, cocker", "sussex spaniel", "irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "old english sheepdog, bobtail", "shetland sheepdog, shetland sheep dog, shetland", "collie", "border collie", "bouvier des flandres, bouviers des flandres", "rottweiler", "german shepherd, german shepherd dog, german police dog, alsatian", "doberman, doberman pinscher", "miniature pinscher", "greater swiss mountain dog", "bernese mountain dog", "appenzeller", "entlebucher", "boxer", "bull mastiff", "tibetan mastiff", "french bulldog", "great dane", "saint bernard, st bernard", "eskimo dog, husky", "malamute, malemute, alaskan malamute", "siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "leonberg", "newfoundland, newfoundland dog", "great pyrenees", "samoyed, samoyede", "pomeranian", "chow, chow chow", "keeshond", "brabancon griffon", "pembroke, pembroke welsh corgi", "cardigan, cardigan welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "mexican hairless", "timber wolf, grey wolf, gray wolf, canis lupus", "white wolf, arctic wolf, canis lupus tundrarum", "red wolf, maned wolf, canis rufus, canis niger", "coyote, prairie wolf, brush wolf, canis latrans", "dingo, warrigal, warragal, canis dingo", "dhole, cuon alpinus", "african hunting dog, hyena dog, cape hunting dog, lycaon pictus", "hyena, hyaena", "red fox, vulpes vulpes", "kit fox, vulpes macrotis", "arctic fox, white fox, alopex lagopus", "grey fox, gray fox, urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "persian cat", "siamese cat, siamese", "egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, felis concolor", "lynx, catamount", "leopard, panthera pardus", "snow leopard, ounce, panthera uncia", "jaguar, panther, panthera onca, felis onca", "lion, king of beasts, panthera leo", "tiger, panthera tigris", "cheetah, chetah, acinonyx jubatus", "brown bear, bruin, ursus arctos", "american black bear, black bear, ursus americanus, euarctos americanus", "ice bear, polar bear, ursus maritimus, thalarctos maritimus", "sloth bear, melursus ursinus, ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "angora, angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, sciurus niger", "marmot", "beaver", "guinea pig, cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, sus scrofa", "wild boar, boar, sus scrofa", "warthog", "hippopotamus, hippo, river horse, hippopotamus amphibius", "ox", "water buffalo, water ox, asiatic buffalo, bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, rocky mountain bighorn, rocky mountain sheep, ovis canadensis", "ibex, capra ibex", "hartebeest", "impala, aepyceros melampus", "gazelle", "arabian camel, dromedary, camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, mustela putorius", "black-footed ferret, ferret, mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, bradypus tridactylus", "orangutan, orang, orangutang, pongo pygmaeus", "gorilla, gorilla gorilla", "chimpanzee, chimp, pan troglodytes", "gibbon, hylobates lar", "siamang, hylobates syndactylus, symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, nasalis larvatus", "marmoset", "capuchin, ringtail, cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, ateles geoffroyi", "squirrel monkey, saimiri sciureus", "madagascar cat, ring-tailed lemur, lemur catta", "indri, indris, indri indri, indri brevicaudatus", "indian elephant, elephas maximus", "african elephant, loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, ailurus fulgens", "giant panda, panda, panda bear, coon bear, ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, oncorhynchus kisutch", "rock beauty, holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, biro", "band aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, atm", "cassette", "cassette player", "castle", "catamaran", "cd player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "crock pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "french horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "ipod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, t-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "model t", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, cro", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "polaroid camera, polaroid land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, rv, r.v.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, crt screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, u-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "french loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "granny smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, cypripedium calceolus, cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, polyporus frondosus, grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue" ]
facebook/dinov2-base-imagenet1k-1-layer
# Vision Transformer (base-sized model) trained using DINOv2 Vision Transformer (ViT) model trained using the DINOv2 method. It was introduced in the paper [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) by Oquab et al. and first released in [this repository](https://github.com/facebookresearch/dinov2). Disclaimer: The team releasing DINOv2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Vision Transformer (ViT) is a transformer encoder model (BERT-like) pretrained on a large collection of images in a self-supervised fashion. Images are presented to the model as a sequence of fixed-size patches, which are linearly embedded. One also adds a [CLS] token to the beginning of a sequence to use it for classification tasks. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Note that this model does not include any fine-tuned heads. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image. ## Intended uses & limitations You can use the model for classifying an image among one of the [1000 ImageNet labels](https://huggingface.co/datasets/huggingface/label-files/blob/main/imagenet-1k-id2label.json). See the [model hub](https://huggingface.co/models?search=facebook/dinov2) to look for other fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python from transformers import AutoImageProcessor, AutoModelForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained('facebook/dinov2-base-imagenet1k-1-layer') model = AutoModelForImageClassification.from_pretrained('facebook/dinov2-base-imagenet1k-1-layer') inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` ### BibTeX entry and citation info ```bibtex misc{oquab2023dinov2, title={DINOv2: Learning Robust Visual Features without Supervision}, author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski}, year={2023}, eprint={2304.07193}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
[ "tench, tinca tinca", "goldfish, carassius auratus", "great white shark, white shark, man-eater, man-eating shark, carcharodon carcharias", "tiger shark, galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, struthio camelus", "brambling, fringilla montifringilla", "goldfinch, carduelis carduelis", "house finch, linnet, carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, passerina cyanea", "robin, american robin, turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, american eagle, haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, strix nebulosa", "european fire salamander, salamandra salamandra", "common newt, triturus vulgaris", "eft", "spotted salamander, ambystoma maculatum", "axolotl, mud puppy, ambystoma mexicanum", "bullfrog, rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, ascaphus trui", "loggerhead, loggerhead turtle, caretta caretta", "leatherback turtle, leatherback, leathery turtle, dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, iguana iguana", "american chameleon, anole, anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, chlamydosaurus kingi", "alligator lizard", "gila monster, heloderma suspectum", "green lizard, lacerta viridis", "african chameleon, chamaeleo chamaeleon", "komodo dragon, komodo lizard, dragon lizard, giant lizard, varanus komodoensis", "african crocodile, nile crocodile, crocodylus niloticus", "american alligator, alligator mississipiensis", "triceratops", "thunder snake, worm snake, carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, hypsiglena torquata", "boa constrictor, constrictor constrictor", "rock python, rock snake, python sebae", "indian cobra, naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, cerastes cornutus", "diamondback, diamondback rattlesnake, crotalus adamanteus", "sidewinder, horned rattlesnake, crotalus cerastes", "trilobite", "harvestman, daddy longlegs, phalangium opilio", "scorpion", "black and gold garden spider, argiope aurantia", "barn spider, araneus cavaticus", "garden spider, aranea diademata", "black widow, latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "african grey, african gray, psittacus erithacus", "macaw", "sulphur-crested cockatoo, kakatoe galerita, cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, mergus serrator", "goose", "black swan, cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "dungeness crab, cancer magister", "rock crab, cancer irroratus", "fiddler crab", "king crab, alaska crab, alaskan king crab, alaska king crab, paralithodes camtschatica", "american lobster, northern lobster, maine lobster, homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, ciconia ciconia", "black stork, ciconia nigra", "spoonbill", "flamingo", "little blue heron, egretta caerulea", "american egret, great white heron, egretta albus", "bittern", "crane", "limpkin, aramus pictus", "european gallinule, porphyrio porphyrio", "american coot, marsh hen, mud hen, water hen, fulica americana", "bustard", "ruddy turnstone, arenaria interpres", "red-backed sandpiper, dunlin, erolia alpina", "redshank, tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, eschrichtius gibbosus, eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, orcinus orca", "dugong, dugong dugon", "sea lion", "chihuahua", "japanese spaniel", "maltese dog, maltese terrier, maltese", "pekinese, pekingese, peke", "shih-tzu", "blenheim spaniel", "papillon", "toy terrier", "rhodesian ridgeback", "afghan hound, afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "walker hound, walker foxhound", "english foxhound", "redbone", "borzoi, russian wolfhound", "irish wolfhound", "italian greyhound", "whippet", "ibizan hound, ibizan podenco", "norwegian elkhound, elkhound", "otterhound, otter hound", "saluki, gazelle hound", "scottish deerhound, deerhound", "weimaraner", "staffordshire bullterrier, staffordshire bull terrier", "american staffordshire terrier, staffordshire terrier, american pit bull terrier, pit bull terrier", "bedlington terrier", "border terrier", "kerry blue terrier", "irish terrier", "norfolk terrier", "norwich terrier", "yorkshire terrier", "wire-haired fox terrier", "lakeland terrier", "sealyham terrier, sealyham", "airedale, airedale terrier", "cairn, cairn terrier", "australian terrier", "dandie dinmont, dandie dinmont terrier", "boston bull, boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "scotch terrier, scottish terrier, scottie", "tibetan terrier, chrysanthemum dog", "silky terrier, sydney silky", "soft-coated wheaten terrier", "west highland white terrier", "lhasa, lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "labrador retriever", "chesapeake bay retriever", "german short-haired pointer", "vizsla, hungarian pointer", "english setter", "irish setter, red setter", "gordon setter", "brittany spaniel", "clumber, clumber spaniel", "english springer, english springer spaniel", "welsh springer spaniel", "cocker spaniel, english cocker spaniel, cocker", "sussex spaniel", "irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "old english sheepdog, bobtail", "shetland sheepdog, shetland sheep dog, shetland", "collie", "border collie", "bouvier des flandres, bouviers des flandres", "rottweiler", "german shepherd, german shepherd dog, german police dog, alsatian", "doberman, doberman pinscher", "miniature pinscher", "greater swiss mountain dog", "bernese mountain dog", "appenzeller", "entlebucher", "boxer", "bull mastiff", "tibetan mastiff", "french bulldog", "great dane", "saint bernard, st bernard", "eskimo dog, husky", "malamute, malemute, alaskan malamute", "siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "leonberg", "newfoundland, newfoundland dog", "great pyrenees", "samoyed, samoyede", "pomeranian", "chow, chow chow", "keeshond", "brabancon griffon", "pembroke, pembroke welsh corgi", "cardigan, cardigan welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "mexican hairless", "timber wolf, grey wolf, gray wolf, canis lupus", "white wolf, arctic wolf, canis lupus tundrarum", "red wolf, maned wolf, canis rufus, canis niger", "coyote, prairie wolf, brush wolf, canis latrans", "dingo, warrigal, warragal, canis dingo", "dhole, cuon alpinus", "african hunting dog, hyena dog, cape hunting dog, lycaon pictus", "hyena, hyaena", "red fox, vulpes vulpes", "kit fox, vulpes macrotis", "arctic fox, white fox, alopex lagopus", "grey fox, gray fox, urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "persian cat", "siamese cat, siamese", "egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, felis concolor", "lynx, catamount", "leopard, panthera pardus", "snow leopard, ounce, panthera uncia", "jaguar, panther, panthera onca, felis onca", "lion, king of beasts, panthera leo", "tiger, panthera tigris", "cheetah, chetah, acinonyx jubatus", "brown bear, bruin, ursus arctos", "american black bear, black bear, ursus americanus, euarctos americanus", "ice bear, polar bear, ursus maritimus, thalarctos maritimus", "sloth bear, melursus ursinus, ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "angora, angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, sciurus niger", "marmot", "beaver", "guinea pig, cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, sus scrofa", "wild boar, boar, sus scrofa", "warthog", "hippopotamus, hippo, river horse, hippopotamus amphibius", "ox", "water buffalo, water ox, asiatic buffalo, bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, rocky mountain bighorn, rocky mountain sheep, ovis canadensis", "ibex, capra ibex", "hartebeest", "impala, aepyceros melampus", "gazelle", "arabian camel, dromedary, camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, mustela putorius", "black-footed ferret, ferret, mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, bradypus tridactylus", "orangutan, orang, orangutang, pongo pygmaeus", "gorilla, gorilla gorilla", "chimpanzee, chimp, pan troglodytes", "gibbon, hylobates lar", "siamang, hylobates syndactylus, symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, nasalis larvatus", "marmoset", "capuchin, ringtail, cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, ateles geoffroyi", "squirrel monkey, saimiri sciureus", "madagascar cat, ring-tailed lemur, lemur catta", "indri, indris, indri indri, indri brevicaudatus", "indian elephant, elephas maximus", "african elephant, loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, ailurus fulgens", "giant panda, panda, panda bear, coon bear, ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, oncorhynchus kisutch", "rock beauty, holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, biro", "band aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, atm", "cassette", "cassette player", "castle", "catamaran", "cd player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "crock pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "french horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "ipod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, t-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "model t", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, cro", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "polaroid camera, polaroid land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, rv, r.v.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, crt screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, u-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "french loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "granny smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, cypripedium calceolus, cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, polyporus frondosus, grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue" ]
facebook/dinov2-large-imagenet1k-1-layer
# Vision Transformer (large-sized model) trained using DINOv2 Vision Transformer (ViT) model trained using the DINOv2 method. It was introduced in the paper [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) by Oquab et al. and first released in [this repository](https://github.com/facebookresearch/dinov2). Disclaimer: The team releasing DINOv2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Vision Transformer (ViT) is a transformer encoder model (BERT-like) pretrained on a large collection of images in a self-supervised fashion. Images are presented to the model as a sequence of fixed-size patches, which are linearly embedded. One also adds a [CLS] token to the beginning of a sequence to use it for classification tasks. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Note that this model does not include any fine-tuned heads. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image. ## Intended uses & limitations You can use the model for classifying an image among one of the [1000 ImageNet labels](https://huggingface.co/datasets/huggingface/label-files/blob/main/imagenet-1k-id2label.json). See the [model hub](https://huggingface.co/models?search=facebook/dinov2) to look for other fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python from transformers import AutoImageProcessor, AutoModelForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained('facebook/dinov2-large-imagenet1k-1-layer') model = AutoModelForImageClassification.from_pretrained('facebook/dinov2-large-imagenet1k-1-layer') inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` ### BibTeX entry and citation info ```bibtex misc{oquab2023dinov2, title={DINOv2: Learning Robust Visual Features without Supervision}, author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski}, year={2023}, eprint={2304.07193}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
[ "tench, tinca tinca", "goldfish, carassius auratus", "great white shark, white shark, man-eater, man-eating shark, carcharodon carcharias", "tiger shark, galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, struthio camelus", "brambling, fringilla montifringilla", "goldfinch, carduelis carduelis", "house finch, linnet, carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, passerina cyanea", "robin, american robin, turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, american eagle, haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, strix nebulosa", "european fire salamander, salamandra salamandra", "common newt, triturus vulgaris", "eft", "spotted salamander, ambystoma maculatum", "axolotl, mud puppy, ambystoma mexicanum", "bullfrog, rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, ascaphus trui", "loggerhead, loggerhead turtle, caretta caretta", "leatherback turtle, leatherback, leathery turtle, dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, iguana iguana", "american chameleon, anole, anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, chlamydosaurus kingi", "alligator lizard", "gila monster, heloderma suspectum", "green lizard, lacerta viridis", "african chameleon, chamaeleo chamaeleon", "komodo dragon, komodo lizard, dragon lizard, giant lizard, varanus komodoensis", "african crocodile, nile crocodile, crocodylus niloticus", "american alligator, alligator mississipiensis", "triceratops", "thunder snake, worm snake, carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, hypsiglena torquata", "boa constrictor, constrictor constrictor", "rock python, rock snake, python sebae", "indian cobra, naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, cerastes cornutus", "diamondback, diamondback rattlesnake, crotalus adamanteus", "sidewinder, horned rattlesnake, crotalus cerastes", "trilobite", "harvestman, daddy longlegs, phalangium opilio", "scorpion", "black and gold garden spider, argiope aurantia", "barn spider, araneus cavaticus", "garden spider, aranea diademata", "black widow, latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "african grey, african gray, psittacus erithacus", "macaw", "sulphur-crested cockatoo, kakatoe galerita, cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, mergus serrator", "goose", "black swan, cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "dungeness crab, cancer magister", "rock crab, cancer irroratus", "fiddler crab", "king crab, alaska crab, alaskan king crab, alaska king crab, paralithodes camtschatica", "american lobster, northern lobster, maine lobster, homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, ciconia ciconia", "black stork, ciconia nigra", "spoonbill", "flamingo", "little blue heron, egretta caerulea", "american egret, great white heron, egretta albus", "bittern", "crane", "limpkin, aramus pictus", "european gallinule, porphyrio porphyrio", "american coot, marsh hen, mud hen, water hen, fulica americana", "bustard", "ruddy turnstone, arenaria interpres", "red-backed sandpiper, dunlin, erolia alpina", "redshank, tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, eschrichtius gibbosus, eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, orcinus orca", "dugong, dugong dugon", "sea lion", "chihuahua", "japanese spaniel", "maltese dog, maltese terrier, maltese", "pekinese, pekingese, peke", "shih-tzu", "blenheim spaniel", "papillon", "toy terrier", "rhodesian ridgeback", "afghan hound, afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "walker hound, walker foxhound", "english foxhound", "redbone", "borzoi, russian wolfhound", "irish wolfhound", "italian greyhound", "whippet", "ibizan hound, ibizan podenco", "norwegian elkhound, elkhound", "otterhound, otter hound", "saluki, gazelle hound", "scottish deerhound, deerhound", "weimaraner", "staffordshire bullterrier, staffordshire bull terrier", "american staffordshire terrier, staffordshire terrier, american pit bull terrier, pit bull terrier", "bedlington terrier", "border terrier", "kerry blue terrier", "irish terrier", "norfolk terrier", "norwich terrier", "yorkshire terrier", "wire-haired fox terrier", "lakeland terrier", "sealyham terrier, sealyham", "airedale, airedale terrier", "cairn, cairn terrier", "australian terrier", "dandie dinmont, dandie dinmont terrier", "boston bull, boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "scotch terrier, scottish terrier, scottie", "tibetan terrier, chrysanthemum dog", "silky terrier, sydney silky", "soft-coated wheaten terrier", "west highland white terrier", "lhasa, lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "labrador retriever", "chesapeake bay retriever", "german short-haired pointer", "vizsla, hungarian pointer", "english setter", "irish setter, red setter", "gordon setter", "brittany spaniel", "clumber, clumber spaniel", "english springer, english springer spaniel", "welsh springer spaniel", "cocker spaniel, english cocker spaniel, cocker", "sussex spaniel", "irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "old english sheepdog, bobtail", "shetland sheepdog, shetland sheep dog, shetland", "collie", "border collie", "bouvier des flandres, bouviers des flandres", "rottweiler", "german shepherd, german shepherd dog, german police dog, alsatian", "doberman, doberman pinscher", "miniature pinscher", "greater swiss mountain dog", "bernese mountain dog", "appenzeller", "entlebucher", "boxer", "bull mastiff", "tibetan mastiff", "french bulldog", "great dane", "saint bernard, st bernard", "eskimo dog, husky", "malamute, malemute, alaskan malamute", "siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "leonberg", "newfoundland, newfoundland dog", "great pyrenees", "samoyed, samoyede", "pomeranian", "chow, chow chow", "keeshond", "brabancon griffon", "pembroke, pembroke welsh corgi", "cardigan, cardigan welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "mexican hairless", "timber wolf, grey wolf, gray wolf, canis lupus", "white wolf, arctic wolf, canis lupus tundrarum", "red wolf, maned wolf, canis rufus, canis niger", "coyote, prairie wolf, brush wolf, canis latrans", "dingo, warrigal, warragal, canis dingo", "dhole, cuon alpinus", "african hunting dog, hyena dog, cape hunting dog, lycaon pictus", "hyena, hyaena", "red fox, vulpes vulpes", "kit fox, vulpes macrotis", "arctic fox, white fox, alopex lagopus", "grey fox, gray fox, urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "persian cat", "siamese cat, siamese", "egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, felis concolor", "lynx, catamount", "leopard, panthera pardus", "snow leopard, ounce, panthera uncia", "jaguar, panther, panthera onca, felis onca", "lion, king of beasts, panthera leo", "tiger, panthera tigris", "cheetah, chetah, acinonyx jubatus", "brown bear, bruin, ursus arctos", "american black bear, black bear, ursus americanus, euarctos americanus", "ice bear, polar bear, ursus maritimus, thalarctos maritimus", "sloth bear, melursus ursinus, ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "angora, angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, sciurus niger", "marmot", "beaver", "guinea pig, cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, sus scrofa", "wild boar, boar, sus scrofa", "warthog", "hippopotamus, hippo, river horse, hippopotamus amphibius", "ox", "water buffalo, water ox, asiatic buffalo, bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, rocky mountain bighorn, rocky mountain sheep, ovis canadensis", "ibex, capra ibex", "hartebeest", "impala, aepyceros melampus", "gazelle", "arabian camel, dromedary, camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, mustela putorius", "black-footed ferret, ferret, mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, bradypus tridactylus", "orangutan, orang, orangutang, pongo pygmaeus", "gorilla, gorilla gorilla", "chimpanzee, chimp, pan troglodytes", "gibbon, hylobates lar", "siamang, hylobates syndactylus, symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, nasalis larvatus", "marmoset", "capuchin, ringtail, cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, ateles geoffroyi", "squirrel monkey, saimiri sciureus", "madagascar cat, ring-tailed lemur, lemur catta", "indri, indris, indri indri, indri brevicaudatus", "indian elephant, elephas maximus", "african elephant, loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, ailurus fulgens", "giant panda, panda, panda bear, coon bear, ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, oncorhynchus kisutch", "rock beauty, holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, biro", "band aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, atm", "cassette", "cassette player", "castle", "catamaran", "cd player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "crock pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "french horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "ipod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, t-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "model t", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, cro", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "polaroid camera, polaroid land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, rv, r.v.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, crt screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, u-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "french loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "granny smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, cypripedium calceolus, cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, polyporus frondosus, grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue" ]
facebook/dinov2-giant-imagenet1k-1-layer
# Vision Transformer (giant-sized model) trained using DINOv2 Vision Transformer (ViT) model trained using the DINOv2 method. It was introduced in the paper [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) by Oquab et al. and first released in [this repository](https://github.com/facebookresearch/dinov2). Disclaimer: The team releasing DINOv2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Vision Transformer (ViT) is a transformer encoder model (BERT-like) pretrained on a large collection of images in a self-supervised fashion. Images are presented to the model as a sequence of fixed-size patches, which are linearly embedded. One also adds a [CLS] token to the beginning of a sequence to use it for classification tasks. One also adds absolute position embeddings before feeding the sequence to the layers of the Transformer encoder. Note that this model does not include any fine-tuned heads. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. One typically places a linear layer on top of the [CLS] token, as the last hidden state of this token can be seen as a representation of an entire image. ## Intended uses & limitations You can use the model for classifying an image among one of the [1000 ImageNet labels](https://huggingface.co/datasets/huggingface/label-files/blob/main/imagenet-1k-id2label.json). See the [model hub](https://huggingface.co/models?search=facebook/dinov2) to look for other fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python from transformers import AutoImageProcessor, AutoModelForImageClassification from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained('facebook/dinov2-giant-imagenet1k-1-layer') model = AutoModelForImageClassification.from_pretrained('facebook/dinov2-giant-imagenet1k-1-layer') inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` ### BibTeX entry and citation info ```bibtex misc{oquab2023dinov2, title={DINOv2: Learning Robust Visual Features without Supervision}, author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski}, year={2023}, eprint={2304.07193}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
[ "tench, tinca tinca", "goldfish, carassius auratus", "great white shark, white shark, man-eater, man-eating shark, carcharodon carcharias", "tiger shark, galeocerdo cuvieri", "hammerhead, hammerhead shark", "electric ray, crampfish, numbfish, torpedo", "stingray", "cock", "hen", "ostrich, struthio camelus", "brambling, fringilla montifringilla", "goldfinch, carduelis carduelis", "house finch, linnet, carpodacus mexicanus", "junco, snowbird", "indigo bunting, indigo finch, indigo bird, passerina cyanea", "robin, american robin, turdus migratorius", "bulbul", "jay", "magpie", "chickadee", "water ouzel, dipper", "kite", "bald eagle, american eagle, haliaeetus leucocephalus", "vulture", "great grey owl, great gray owl, strix nebulosa", "european fire salamander, salamandra salamandra", "common newt, triturus vulgaris", "eft", "spotted salamander, ambystoma maculatum", "axolotl, mud puppy, ambystoma mexicanum", "bullfrog, rana catesbeiana", "tree frog, tree-frog", "tailed frog, bell toad, ribbed toad, tailed toad, ascaphus trui", "loggerhead, loggerhead turtle, caretta caretta", "leatherback turtle, leatherback, leathery turtle, dermochelys coriacea", "mud turtle", "terrapin", "box turtle, box tortoise", "banded gecko", "common iguana, iguana, iguana iguana", "american chameleon, anole, anolis carolinensis", "whiptail, whiptail lizard", "agama", "frilled lizard, chlamydosaurus kingi", "alligator lizard", "gila monster, heloderma suspectum", "green lizard, lacerta viridis", "african chameleon, chamaeleo chamaeleon", "komodo dragon, komodo lizard, dragon lizard, giant lizard, varanus komodoensis", "african crocodile, nile crocodile, crocodylus niloticus", "american alligator, alligator mississipiensis", "triceratops", "thunder snake, worm snake, carphophis amoenus", "ringneck snake, ring-necked snake, ring snake", "hognose snake, puff adder, sand viper", "green snake, grass snake", "king snake, kingsnake", "garter snake, grass snake", "water snake", "vine snake", "night snake, hypsiglena torquata", "boa constrictor, constrictor constrictor", "rock python, rock snake, python sebae", "indian cobra, naja naja", "green mamba", "sea snake", "horned viper, cerastes, sand viper, horned asp, cerastes cornutus", "diamondback, diamondback rattlesnake, crotalus adamanteus", "sidewinder, horned rattlesnake, crotalus cerastes", "trilobite", "harvestman, daddy longlegs, phalangium opilio", "scorpion", "black and gold garden spider, argiope aurantia", "barn spider, araneus cavaticus", "garden spider, aranea diademata", "black widow, latrodectus mactans", "tarantula", "wolf spider, hunting spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse, partridge, bonasa umbellus", "prairie chicken, prairie grouse, prairie fowl", "peacock", "quail", "partridge", "african grey, african gray, psittacus erithacus", "macaw", "sulphur-crested cockatoo, kakatoe galerita, cacatua galerita", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "drake", "red-breasted merganser, mergus serrator", "goose", "black swan, cygnus atratus", "tusker", "echidna, spiny anteater, anteater", "platypus, duckbill, duckbilled platypus, duck-billed platypus, ornithorhynchus anatinus", "wallaby, brush kangaroo", "koala, koala bear, kangaroo bear, native bear, phascolarctos cinereus", "wombat", "jellyfish", "sea anemone, anemone", "brain coral", "flatworm, platyhelminth", "nematode, nematode worm, roundworm", "conch", "snail", "slug", "sea slug, nudibranch", "chiton, coat-of-mail shell, sea cradle, polyplacophore", "chambered nautilus, pearly nautilus, nautilus", "dungeness crab, cancer magister", "rock crab, cancer irroratus", "fiddler crab", "king crab, alaska crab, alaskan king crab, alaska king crab, paralithodes camtschatica", "american lobster, northern lobster, maine lobster, homarus americanus", "spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish", "crayfish, crawfish, crawdad, crawdaddy", "hermit crab", "isopod", "white stork, ciconia ciconia", "black stork, ciconia nigra", "spoonbill", "flamingo", "little blue heron, egretta caerulea", "american egret, great white heron, egretta albus", "bittern", "crane", "limpkin, aramus pictus", "european gallinule, porphyrio porphyrio", "american coot, marsh hen, mud hen, water hen, fulica americana", "bustard", "ruddy turnstone, arenaria interpres", "red-backed sandpiper, dunlin, erolia alpina", "redshank, tringa totanus", "dowitcher", "oystercatcher, oyster catcher", "pelican", "king penguin, aptenodytes patagonica", "albatross, mollymawk", "grey whale, gray whale, devilfish, eschrichtius gibbosus, eschrichtius robustus", "killer whale, killer, orca, grampus, sea wolf, orcinus orca", "dugong, dugong dugon", "sea lion", "chihuahua", "japanese spaniel", "maltese dog, maltese terrier, maltese", "pekinese, pekingese, peke", "shih-tzu", "blenheim spaniel", "papillon", "toy terrier", "rhodesian ridgeback", "afghan hound, afghan", "basset, basset hound", "beagle", "bloodhound, sleuthhound", "bluetick", "black-and-tan coonhound", "walker hound, walker foxhound", "english foxhound", "redbone", "borzoi, russian wolfhound", "irish wolfhound", "italian greyhound", "whippet", "ibizan hound, ibizan podenco", "norwegian elkhound, elkhound", "otterhound, otter hound", "saluki, gazelle hound", "scottish deerhound, deerhound", "weimaraner", "staffordshire bullterrier, staffordshire bull terrier", "american staffordshire terrier, staffordshire terrier, american pit bull terrier, pit bull terrier", "bedlington terrier", "border terrier", "kerry blue terrier", "irish terrier", "norfolk terrier", "norwich terrier", "yorkshire terrier", "wire-haired fox terrier", "lakeland terrier", "sealyham terrier, sealyham", "airedale, airedale terrier", "cairn, cairn terrier", "australian terrier", "dandie dinmont, dandie dinmont terrier", "boston bull, boston terrier", "miniature schnauzer", "giant schnauzer", "standard schnauzer", "scotch terrier, scottish terrier, scottie", "tibetan terrier, chrysanthemum dog", "silky terrier, sydney silky", "soft-coated wheaten terrier", "west highland white terrier", "lhasa, lhasa apso", "flat-coated retriever", "curly-coated retriever", "golden retriever", "labrador retriever", "chesapeake bay retriever", "german short-haired pointer", "vizsla, hungarian pointer", "english setter", "irish setter, red setter", "gordon setter", "brittany spaniel", "clumber, clumber spaniel", "english springer, english springer spaniel", "welsh springer spaniel", "cocker spaniel, english cocker spaniel, cocker", "sussex spaniel", "irish water spaniel", "kuvasz", "schipperke", "groenendael", "malinois", "briard", "kelpie", "komondor", "old english sheepdog, bobtail", "shetland sheepdog, shetland sheep dog, shetland", "collie", "border collie", "bouvier des flandres, bouviers des flandres", "rottweiler", "german shepherd, german shepherd dog, german police dog, alsatian", "doberman, doberman pinscher", "miniature pinscher", "greater swiss mountain dog", "bernese mountain dog", "appenzeller", "entlebucher", "boxer", "bull mastiff", "tibetan mastiff", "french bulldog", "great dane", "saint bernard, st bernard", "eskimo dog, husky", "malamute, malemute, alaskan malamute", "siberian husky", "dalmatian, coach dog, carriage dog", "affenpinscher, monkey pinscher, monkey dog", "basenji", "pug, pug-dog", "leonberg", "newfoundland, newfoundland dog", "great pyrenees", "samoyed, samoyede", "pomeranian", "chow, chow chow", "keeshond", "brabancon griffon", "pembroke, pembroke welsh corgi", "cardigan, cardigan welsh corgi", "toy poodle", "miniature poodle", "standard poodle", "mexican hairless", "timber wolf, grey wolf, gray wolf, canis lupus", "white wolf, arctic wolf, canis lupus tundrarum", "red wolf, maned wolf, canis rufus, canis niger", "coyote, prairie wolf, brush wolf, canis latrans", "dingo, warrigal, warragal, canis dingo", "dhole, cuon alpinus", "african hunting dog, hyena dog, cape hunting dog, lycaon pictus", "hyena, hyaena", "red fox, vulpes vulpes", "kit fox, vulpes macrotis", "arctic fox, white fox, alopex lagopus", "grey fox, gray fox, urocyon cinereoargenteus", "tabby, tabby cat", "tiger cat", "persian cat", "siamese cat, siamese", "egyptian cat", "cougar, puma, catamount, mountain lion, painter, panther, felis concolor", "lynx, catamount", "leopard, panthera pardus", "snow leopard, ounce, panthera uncia", "jaguar, panther, panthera onca, felis onca", "lion, king of beasts, panthera leo", "tiger, panthera tigris", "cheetah, chetah, acinonyx jubatus", "brown bear, bruin, ursus arctos", "american black bear, black bear, ursus americanus, euarctos americanus", "ice bear, polar bear, ursus maritimus, thalarctos maritimus", "sloth bear, melursus ursinus, ursus ursinus", "mongoose", "meerkat, mierkat", "tiger beetle", "ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle", "ground beetle, carabid beetle", "long-horned beetle, longicorn, longicorn beetle", "leaf beetle, chrysomelid", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant, emmet, pismire", "grasshopper, hopper", "cricket", "walking stick, walkingstick, stick insect", "cockroach, roach", "mantis, mantid", "cicada, cicala", "leafhopper", "lacewing, lacewing fly", "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk", "damselfly", "admiral", "ringlet, ringlet butterfly", "monarch, monarch butterfly, milkweed butterfly, danaus plexippus", "cabbage butterfly", "sulphur butterfly, sulfur butterfly", "lycaenid, lycaenid butterfly", "starfish, sea star", "sea urchin", "sea cucumber, holothurian", "wood rabbit, cottontail, cottontail rabbit", "hare", "angora, angora rabbit", "hamster", "porcupine, hedgehog", "fox squirrel, eastern fox squirrel, sciurus niger", "marmot", "beaver", "guinea pig, cavia cobaya", "sorrel", "zebra", "hog, pig, grunter, squealer, sus scrofa", "wild boar, boar, sus scrofa", "warthog", "hippopotamus, hippo, river horse, hippopotamus amphibius", "ox", "water buffalo, water ox, asiatic buffalo, bubalus bubalis", "bison", "ram, tup", "bighorn, bighorn sheep, cimarron, rocky mountain bighorn, rocky mountain sheep, ovis canadensis", "ibex, capra ibex", "hartebeest", "impala, aepyceros melampus", "gazelle", "arabian camel, dromedary, camelus dromedarius", "llama", "weasel", "mink", "polecat, fitch, foulmart, foumart, mustela putorius", "black-footed ferret, ferret, mustela nigripes", "otter", "skunk, polecat, wood pussy", "badger", "armadillo", "three-toed sloth, ai, bradypus tridactylus", "orangutan, orang, orangutang, pongo pygmaeus", "gorilla, gorilla gorilla", "chimpanzee, chimp, pan troglodytes", "gibbon, hylobates lar", "siamang, hylobates syndactylus, symphalangus syndactylus", "guenon, guenon monkey", "patas, hussar monkey, erythrocebus patas", "baboon", "macaque", "langur", "colobus, colobus monkey", "proboscis monkey, nasalis larvatus", "marmoset", "capuchin, ringtail, cebus capucinus", "howler monkey, howler", "titi, titi monkey", "spider monkey, ateles geoffroyi", "squirrel monkey, saimiri sciureus", "madagascar cat, ring-tailed lemur, lemur catta", "indri, indris, indri indri, indri brevicaudatus", "indian elephant, elephas maximus", "african elephant, loxodonta africana", "lesser panda, red panda, panda, bear cat, cat bear, ailurus fulgens", "giant panda, panda, panda bear, coon bear, ailuropoda melanoleuca", "barracouta, snoek", "eel", "coho, cohoe, coho salmon, blue jack, silver salmon, oncorhynchus kisutch", "rock beauty, holocanthus tricolor", "anemone fish", "sturgeon", "gar, garfish, garpike, billfish, lepisosteus osseus", "lionfish", "puffer, pufferfish, blowfish, globefish", "abacus", "abaya", "academic gown, academic robe, judge's robe", "accordion, piano accordion, squeeze box", "acoustic guitar", "aircraft carrier, carrier, flattop, attack aircraft carrier", "airliner", "airship, dirigible", "altar", "ambulance", "amphibian, amphibious vehicle", "analog clock", "apiary, bee house", "apron", "ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin", "assault rifle, assault gun", "backpack, back pack, knapsack, packsack, rucksack, haversack", "bakery, bakeshop, bakehouse", "balance beam, beam", "balloon", "ballpoint, ballpoint pen, ballpen, biro", "band aid", "banjo", "bannister, banister, balustrade, balusters, handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel, cask", "barrow, garden cart, lawn cart, wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "bathing cap, swimming cap", "bath towel", "bathtub, bathing tub, bath, tub", "beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon", "beacon, lighthouse, beacon light, pharos", "beaker", "bearskin, busby, shako", "beer bottle", "beer glass", "bell cote, bell cot", "bib", "bicycle-built-for-two, tandem bicycle, tandem", "bikini, two-piece", "binder, ring-binder", "binoculars, field glasses, opera glasses", "birdhouse", "boathouse", "bobsled, bobsleigh, bob", "bolo tie, bolo, bola tie, bola", "bonnet, poke bonnet", "bookcase", "bookshop, bookstore, bookstall", "bottlecap", "bow", "bow tie, bow-tie, bowtie", "brass, memorial tablet, plaque", "brassiere, bra, bandeau", "breakwater, groin, groyne, mole, bulwark, seawall, jetty", "breastplate, aegis, egis", "broom", "bucket, pail", "buckle", "bulletproof vest", "bullet train, bullet", "butcher shop, meat market", "cab, hack, taxi, taxicab", "caldron, cauldron", "candle, taper, wax light", "cannon", "canoe", "can opener, tin opener", "cardigan", "car mirror", "carousel, carrousel, merry-go-round, roundabout, whirligig", "carpenter's kit, tool kit", "carton", "car wheel", "cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, atm", "cassette", "cassette player", "castle", "catamaran", "cd player", "cello, violoncello", "cellular telephone, cellular phone, cellphone, cell, mobile phone", "chain", "chainlink fence", "chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour", "chain saw, chainsaw", "chest", "chiffonier, commode", "chime, bell, gong", "china cabinet, china closet", "christmas stocking", "church, church building", "cinema, movie theater, movie theatre, movie house, picture palace", "cleaver, meat cleaver, chopper", "cliff dwelling", "cloak", "clog, geta, patten, sabot", "cocktail shaker", "coffee mug", "coffeepot", "coil, spiral, volute, whorl, helix", "combination lock", "computer keyboard, keypad", "confectionery, confectionary, candy store", "container ship, containership, container vessel", "convertible", "corkscrew, bottle screw", "cornet, horn, trumpet, trump", "cowboy boot", "cowboy hat, ten-gallon hat", "cradle", "crane", "crash helmet", "crate", "crib, cot", "crock pot", "croquet ball", "crutch", "cuirass", "dam, dike, dyke", "desk", "desktop computer", "dial telephone, dial phone", "diaper, nappy, napkin", "digital clock", "digital watch", "dining table, board", "dishrag, dishcloth", "dishwasher, dish washer, dishwashing machine", "disk brake, disc brake", "dock, dockage, docking facility", "dogsled, dog sled, dog sleigh", "dome", "doormat, welcome mat", "drilling platform, offshore rig", "drum, membranophone, tympan", "drumstick", "dumbbell", "dutch oven", "electric fan, blower", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso maker", "face powder", "feather boa, boa", "file, file cabinet, filing cabinet", "fireboat", "fire engine, fire truck", "fire screen, fireguard", "flagpole, flagstaff", "flute, transverse flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster", "freight car", "french horn, horn", "frying pan, frypan, skillet", "fur coat", "garbage truck, dustcart", "gasmask, respirator, gas helmet", "gas pump, gasoline pump, petrol pump, island dispenser", "goblet", "go-kart", "golf ball", "golfcart, golf cart", "gondola", "gong, tam-tam", "gown", "grand piano, grand", "greenhouse, nursery, glasshouse", "grille, radiator grille", "grocery store, grocery, food market, market", "guillotine", "hair slide", "hair spray", "half track", "hammer", "hamper", "hand blower, blow dryer, blow drier, hair dryer, hair drier", "hand-held computer, hand-held microcomputer", "handkerchief, hankie, hanky, hankey", "hard disc, hard disk, fixed disk", "harmonica, mouth organ, harp, mouth harp", "harp", "harvester, reaper", "hatchet", "holster", "home theater, home theatre", "honeycomb", "hook, claw", "hoopskirt, crinoline", "horizontal bar, high bar", "horse cart, horse-cart", "hourglass", "ipod", "iron, smoothing iron", "jack-o'-lantern", "jean, blue jean, denim", "jeep, landrover", "jersey, t-shirt, tee shirt", "jigsaw puzzle", "jinrikisha, ricksha, rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat, laboratory coat", "ladle", "lampshade, lamp shade", "laptop, laptop computer", "lawn mower, mower", "lens cap, lens cover", "letter opener, paper knife, paperknife", "library", "lifeboat", "lighter, light, igniter, ignitor", "limousine, limo", "liner, ocean liner", "lipstick, lip rouge", "loafer", "lotion", "loudspeaker, speaker, speaker unit, loudspeaker system, speaker system", "loupe, jeweler's loupe", "lumbermill, sawmill", "magnetic compass", "mailbag, postbag", "mailbox, letter box", "maillot", "maillot, tank suit", "manhole cover", "maraca", "marimba, xylophone", "mask", "matchstick", "maypole", "maze, labyrinth", "measuring cup", "medicine chest, medicine cabinet", "megalith, megalithic structure", "microphone, mike", "microwave, microwave oven", "military uniform", "milk can", "minibus", "miniskirt, mini", "minivan", "missile", "mitten", "mixing bowl", "mobile home, manufactured home", "model t", "modem", "monastery", "monitor", "moped", "mortar", "mortarboard", "mosque", "mosquito net", "motor scooter, scooter", "mountain bike, all-terrain bike, off-roader", "mountain tent", "mouse, computer mouse", "mousetrap", "moving van", "muzzle", "nail", "neck brace", "necklace", "nipple", "notebook, notebook computer", "obelisk", "oboe, hautboy, hautbois", "ocarina, sweet potato", "odometer, hodometer, mileometer, milometer", "oil filter", "organ, pipe organ", "oscilloscope, scope, cathode-ray oscilloscope, cro", "overskirt", "oxcart", "oxygen mask", "packet", "paddle, boat paddle", "paddlewheel, paddle wheel", "padlock", "paintbrush", "pajama, pyjama, pj's, jammies", "palace", "panpipe, pandean pipe, syrinx", "paper towel", "parachute, chute", "parallel bars, bars", "park bench", "parking meter", "passenger car, coach, carriage", "patio, terrace", "pay-phone, pay-station", "pedestal, plinth, footstall", "pencil box, pencil case", "pencil sharpener", "perfume, essence", "petri dish", "photocopier", "pick, plectrum, plectron", "pickelhaube", "picket fence, paling", "pickup, pickup truck", "pier", "piggy bank, penny bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate, pirate ship", "pitcher, ewer", "plane, carpenter's plane, woodworking plane", "planetarium", "plastic bag", "plate rack", "plow, plough", "plunger, plumber's helper", "polaroid camera, polaroid land camera", "pole", "police van, police wagon, paddy wagon, patrol wagon, wagon, black maria", "poncho", "pool table, billiard table, snooker table", "pop bottle, soda bottle", "pot, flowerpot", "potter's wheel", "power drill", "prayer rug, prayer mat", "printer", "prison, prison house", "projectile, missile", "projector", "puck, hockey puck", "punching bag, punch bag, punching ball, punchball", "purse", "quill, quill pen", "quilt, comforter, comfort, puff", "racer, race car, racing car", "racket, racquet", "radiator", "radio, wireless", "radio telescope, radio reflector", "rain barrel", "recreational vehicle, rv, r.v.", "reel", "reflex camera", "refrigerator, icebox", "remote control, remote", "restaurant, eating house, eating place, eatery", "revolver, six-gun, six-shooter", "rifle", "rocking chair, rocker", "rotisserie", "rubber eraser, rubber, pencil eraser", "rugby ball", "rule, ruler", "running shoe", "safe", "safety pin", "saltshaker, salt shaker", "sandal", "sarong", "sax, saxophone", "scabbard", "scale, weighing machine", "school bus", "schooner", "scoreboard", "screen, crt screen", "screw", "screwdriver", "seat belt, seatbelt", "sewing machine", "shield, buckler", "shoe shop, shoe-shop, shoe store", "shoji", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "ski mask", "sleeping bag", "slide rule, slipstick", "sliding door", "slot, one-armed bandit", "snorkel", "snowmobile", "snowplow, snowplough", "soap dispenser", "soccer ball", "sock", "solar dish, solar collector, solar furnace", "sombrero", "soup bowl", "space bar", "space heater", "space shuttle", "spatula", "speedboat", "spider web, spider's web", "spindle", "sports car, sport car", "spotlight, spot", "stage", "steam locomotive", "steel arch bridge", "steel drum", "stethoscope", "stole", "stone wall", "stopwatch, stop watch", "stove", "strainer", "streetcar, tram, tramcar, trolley, trolley car", "stretcher", "studio couch, day bed", "stupa, tope", "submarine, pigboat, sub, u-boat", "suit, suit of clothes", "sundial", "sunglass", "sunglasses, dark glasses, shades", "sunscreen, sunblock, sun blocker", "suspension bridge", "swab, swob, mop", "sweatshirt", "swimming trunks, bathing trunks", "swing", "switch, electric switch, electrical switch", "syringe", "table lamp", "tank, army tank, armored combat vehicle, armoured combat vehicle", "tape player", "teapot", "teddy, teddy bear", "television, television system", "tennis ball", "thatch, thatched roof", "theater curtain, theatre curtain", "thimble", "thresher, thrasher, threshing machine", "throne", "tile roof", "toaster", "tobacco shop, tobacconist shop, tobacconist", "toilet seat", "torch", "totem pole", "tow truck, tow car, wrecker", "toyshop", "tractor", "trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi", "tray", "trench coat", "tricycle, trike, velocipede", "trimaran", "tripod", "triumphal arch", "trolleybus, trolley coach, trackless trolley", "trombone", "tub, vat", "turnstile", "typewriter keyboard", "umbrella", "unicycle, monocycle", "upright, upright piano", "vacuum, vacuum cleaner", "vase", "vault", "velvet", "vending machine", "vestment", "viaduct", "violin, fiddle", "volleyball", "waffle iron", "wall clock", "wallet, billfold, notecase, pocketbook", "wardrobe, closet, press", "warplane, military plane", "washbasin, handbasin, washbowl, lavabo, wash-hand basin", "washer, automatic washer, washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "wig", "window screen", "window shade", "windsor tie", "wine bottle", "wing", "wok", "wooden spoon", "wool, woolen, woollen", "worm fence, snake fence, snake-rail fence, virginia fence", "wreck", "yawl", "yurt", "web site, website, internet site, site", "comic book", "crossword puzzle, crossword", "street sign", "traffic light, traffic signal, stoplight", "book jacket, dust cover, dust jacket, dust wrapper", "menu", "plate", "guacamole", "consomme", "hot pot, hotpot", "trifle", "ice cream, icecream", "ice lolly, lolly, lollipop, popsicle", "french loaf", "bagel, beigel", "pretzel", "cheeseburger", "hotdog, hot dog, red hot", "mashed potato", "head cabbage", "broccoli", "cauliflower", "zucchini, courgette", "spaghetti squash", "acorn squash", "butternut squash", "cucumber, cuke", "artichoke, globe artichoke", "bell pepper", "cardoon", "mushroom", "granny smith", "strawberry", "orange", "lemon", "fig", "pineapple, ananas", "banana", "jackfruit, jak, jack", "custard apple", "pomegranate", "hay", "carbonara", "chocolate sauce, chocolate syrup", "dough", "meat loaf, meatloaf", "pizza, pizza pie", "potpie", "burrito", "red wine", "espresso", "cup", "eggnog", "alp", "bubble", "cliff, drop, drop-off", "coral reef", "geyser", "lakeside, lakeshore", "promontory, headland, head, foreland", "sandbar, sand bar", "seashore, coast, seacoast, sea-coast", "valley, vale", "volcano", "ballplayer, baseball player", "groom, bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper, yellow lady-slipper, cypripedium calceolus, cypripedium parviflorum", "corn", "acorn", "hip, rose hip, rosehip", "buckeye, horse chestnut, conker", "coral fungus", "agaric", "gyromitra", "stinkhorn, carrion fungus", "earthstar", "hen-of-the-woods, hen of the woods, polyporus frondosus, grifola frondosa", "bolete", "ear, spike, capitulum", "toilet tissue, toilet paper, bathroom tissue" ]
ahirtonlopes/swin-tiny-patch4-window7-224-finetuned-cifar10
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-cifar10 This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the cifar10 dataset. It achieves the following results on the evaluation set: - Loss: 0.1144 - Accuracy: 0.962 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.601 | 1.0 | 88 | 0.1927 | 0.9434 | | 0.4581 | 2.0 | 176 | 0.1288 | 0.9576 | | 0.4117 | 3.0 | 264 | 0.1144 | 0.962 | ### Framework versions - Transformers 4.34.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.14.0
[ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ]
DazMashaly/VIT_large_ieee
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # VIT_large_ieee This model is a fine-tuned version of [google/vit-large-patch16-224-in21k](https://huggingface.co/google/vit-large-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0230 - Accuracy: 0.9941 ## Model description This model was used for IEEE ManSB VICTORIS 2.0 Final Competition ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0047 | 0.67 | 100 | 0.0283 | 0.9929 | | 0.0165 | 1.34 | 200 | 0.0230 | 0.9941 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "ascariasis", "babesia", "opisthorchis viverrine", "paragonimus spp", "t. rubrum", "taenia spp", "trichuris trichiura", "capillaria p", "enterobius v", "epidermophyton floccosum", "fasciolopsis buski", "hookworm egg", "hymenolepis diminuta", "hymenolepis nana", "leishmania" ]
AhmedBedair/vit-base-beans-demo-v5
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-beans-demo-v5 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the bact dataset. It achieves the following results on the evaluation set: - Loss: 0.0612 - Accuracy: 0.9874 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0007 | 0.17 | 100 | 0.1211 | 0.9748 | | 0.0005 | 0.34 | 200 | 0.1027 | 0.9786 | | 0.0195 | 0.5 | 300 | 0.0869 | 0.9836 | | 0.0025 | 0.67 | 400 | 0.0823 | 0.9845 | | 0.0154 | 0.84 | 500 | 0.0888 | 0.9828 | | 0.0004 | 1.01 | 600 | 0.0781 | 0.9853 | | 0.0004 | 1.17 | 700 | 0.0931 | 0.9832 | | 0.0004 | 1.34 | 800 | 0.0995 | 0.9811 | | 0.0004 | 1.51 | 900 | 0.0925 | 0.9832 | | 0.0003 | 1.68 | 1000 | 0.0857 | 0.9836 | | 0.0364 | 1.85 | 1100 | 0.0788 | 0.9845 | | 0.0003 | 2.01 | 1200 | 0.0775 | 0.9840 | | 0.0003 | 2.18 | 1300 | 0.0718 | 0.9857 | | 0.0003 | 2.35 | 1400 | 0.0804 | 0.9849 | | 0.0003 | 2.52 | 1500 | 0.0751 | 0.9836 | | 0.0003 | 2.68 | 1600 | 0.0659 | 0.9870 | | 0.0002 | 2.85 | 1700 | 0.0612 | 0.9874 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "ascariasis", "babesia", "opisthorchis viverrine", "paragonimus spp", "t. rubrum", "taenia spp", "trichuris trichiura", "capillaria p", "enterobius v", "epidermophyton floccosum", "fasciolopsis buski", "hookworm egg", "hymenolepis diminuta", "hymenolepis nana", "leishmania" ]
Mangara01/emotion_classification
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # emotion_classification This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.9455 - Accuracy: 0.3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 10 | 2.0636 | 0.1187 | | No log | 2.0 | 20 | 2.0568 | 0.1437 | | No log | 3.0 | 30 | 2.0321 | 0.1812 | | No log | 4.0 | 40 | 2.0247 | 0.2 | | No log | 5.0 | 50 | 1.9975 | 0.3125 | | No log | 6.0 | 60 | 1.9793 | 0.2875 | | No log | 7.0 | 70 | 1.9746 | 0.275 | | No log | 8.0 | 80 | 1.9530 | 0.3063 | | No log | 9.0 | 90 | 1.9487 | 0.3438 | | No log | 10.0 | 100 | 1.9513 | 0.2812 | ### Framework versions - Transformers 4.28.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]
verypro/vit-base-patch16-224-cifar10
# Model Card for Model ViT fine tuning on CiFAR10 <!-- Provide a quick summary of what the model is/does. --> It's a toy experiemnt of fine tuning ViT by using huggingface transformers. ## Model Details It's fine tuned on CiFAR10 for 1000 steps, and achieved accuracy of 98.7% on test split. ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** verypro - **Model type:** Vision Transformer - **License:** MIT - **Finetuned from model [optional]:** google/vit-base-patch16-224 ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ```python from transformers import ViTImageProcessor, ViTForImageClassification from torchvision import datasets # # 初始化模型和特征提取器 image_processor = ViTImageProcessor.from_pretrained('verypro/vit-base-patch16-224-cifar10') model = ViTForImageClassification.from_pretrained('verypro/vit-base-patch16-224-cifar10') # 加载 CIFAR10 数据集 test_dataset = datasets.CIFAR10(root='./data', train=False, download=True) sample = test_dataset[0] image = sample[0] gt_label = sample[1] # 保存原始图像,并打印其标签 image.save("original.png") print(f"Ground truth class: '{test_dataset.classes[gt_label]}'") inputs = image_processor(image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits print(logits) predicted_class_idx = logits.argmax(-1).item() predicted_class_label = test_dataset.classes[predicted_class_idx] print(f"Predicted class: '{predicted_class_label}', confidence: {logits[0, predicted_class_idx]:.2f}") ``` The output of above code snippets should be like: ```bash Ground truth class: 'cat' tensor([[-1.1497, -0.1080, -0.7349, 9.2517, -1.3094, 0.5403, -0.9521, -1.0223, -1.4102, -1.5389]], grad_fn=<AddmmBackward0>) Predicted class: 'cat', confidence: 9.25 ```
[ "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck" ]
elenaThevalley/mobilenet_v2_1.0_224-finetuned-32bs-0.01lr
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mobilenet_v2_1.0_224-finetuned-32bs-0.01lr This model is a fine-tuned version of [google/mobilenet_v2_1.0_224](https://huggingface.co/google/mobilenet_v2_1.0_224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.2230 - Accuracy: 0.9207 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.01 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 0.99 | 53 | 0.3603 | 0.8745 | | No log | 1.99 | 107 | 0.2323 | 0.9183 | | No log | 2.96 | 159 | 0.2230 | 0.9207 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "drink", "food", "inside", "menu", "outside" ]
anurag629/swin-tiny-patch4-window7-224-finetuned-eurosat
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0133 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.8235 | 1.0 | 13 | 0.6034 | 0.9239 | | 0.5091 | 2.0 | 26 | 0.1870 | 0.9728 | | 0.273 | 3.0 | 39 | 0.0895 | 0.9946 | | 0.1401 | 4.0 | 52 | 0.0543 | 0.9946 | | 0.0936 | 5.0 | 65 | 0.0484 | 0.9891 | | 0.091 | 6.0 | 78 | 0.0498 | 0.9891 | | 0.0603 | 7.0 | 91 | 0.0133 | 1.0 | | 0.0421 | 8.0 | 104 | 0.0196 | 0.9946 | | 0.0557 | 9.0 | 117 | 0.0172 | 0.9946 | | 0.0552 | 10.0 | 130 | 0.0103 | 1.0 | | 0.045 | 11.0 | 143 | 0.0082 | 1.0 | | 0.0355 | 12.0 | 156 | 0.0071 | 1.0 | | 0.0491 | 13.0 | 169 | 0.0087 | 1.0 | | 0.0384 | 14.0 | 182 | 0.0065 | 1.0 | | 0.0324 | 15.0 | 195 | 0.0061 | 1.0 | ### Framework versions - Transformers 4.28.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "alpinia galanga (rasna)", "amaranthus viridis (arive-dantu)", "artocarpus heterophyllus (jackfruit)", "azadirachta indica (neem)", "basella alba (basale)", "brassica juncea (indian mustard)", "carissa carandas (karanda)", "citrus limon (lemon)", "ficus auriculata (roxburgh fig)", "ficus religiosa (peepal tree)", "hibiscus rosa-sinensis", "jasminum (jasmine)", "mangifera indica (mango)", "mentha (mint)", "moringa oleifera (drumstick)", "muntingia calabura (jamaica cherry-gasagase)", "murraya koenigii (curry)", "nerium oleander (oleander)", "nyctanthes arbor-tristis (parijata)", "ocimum tenuiflorum (tulsi)", "piper betle (betel)", "plectranthus amboinicus (mexican mint)", "pongamia pinnata (indian beech)", "psidium guajava (guava)", "punica granatum (pomegranate)", "santalum album (sandalwood)", "syzygium cumini (jamun)", "syzygium jambos (rose apple)", "tabernaemontana divaricata (crape jasmine)", "trigonella foenum-graecum (fenugreek)" ]
bryandts/image_classification_face
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # image_classification_face This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 1.1157 - Accuracy: 0.625 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 40 | 1.6266 | 0.475 | | No log | 2.0 | 80 | 1.3303 | 0.5375 | | No log | 3.0 | 120 | 1.2399 | 0.525 | | No log | 4.0 | 160 | 1.1779 | 0.5563 | | No log | 5.0 | 200 | 1.1825 | 0.55 | | No log | 6.0 | 240 | 1.1564 | 0.5875 | | No log | 7.0 | 280 | 1.1258 | 0.6125 | | No log | 8.0 | 320 | 1.1154 | 0.625 | | No log | 9.0 | 360 | 1.1169 | 0.6062 | | No log | 10.0 | 400 | 1.1155 | 0.625 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "anger", "contempt", "disgust", "fear", "happy", "neutral", "sad", "surprise" ]