modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Akashpb13/xlsr_hungarian_new
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "hu", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "generated_from_trainer", "hf-asr-leaderboard", "model_for_talk", "mozilla-foundation/common_voice_8_0", "robust-speech-event", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: openrail language: - en --- import openai import gradio openai.api_key = "vinayak-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" messages = [{"role": "system", "content": "You are a financial experts that specializes in real estate investment and negotiation"}] def CustomChatGPT(user_input): messages.append({"role": "user", "content": user_input}) response = openai.ChatCompletion.create( model = "gpt-3.5-turbo", messages = messages ) ChatGPT_reply = response["choices"][0]["message"]["content"] messages.append({"role": "assistant", "content": ChatGPT_reply}) return ChatGPT_reply demo = gradio.Interface(fn=CustomChatGPT, inputs = "text", outputs = "text", title = "Real Estate Pro") demo.launch(share=True)
AlexKay/xlm-roberta-large-qa-multilingual-finedtuned-ru
[ "pytorch", "xlm-roberta", "question-answering", "en", "ru", "multilingual", "arxiv:1912.09723", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10,012
null
--- tags: - generated_from_trainer model-index: - name: ChatCare-3epoch-wandb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ChatCare-3epoch-wandb This model is a fine-tuned version of [ClueAI/ChatYuan-large-v2](https://huggingface.co/ClueAI/ChatYuan-large-v2) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
AlexMaclean/sentence-compression-roberta
[ "pytorch", "roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - zh license: mit tags: - 1.1.0 - generated_from_trainer datasets: - facebook/voxpopuli model-index: - name: SpeechT5 TTS Dutch neunit results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # SpeechT5 TTS Dutch neunit This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on the VoxPopuli dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.29.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.12.1
AlexN/xls-r-300m-fr-0
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "fr", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: mit inference: False --- The TopicGPT model has been developed with the aim of detecting and extracting the primary subject matter of conversations. This capability can prove to be exceedingly beneficial in a wide range of natural language processing (NLP) applications. To utilize the TopicGPT model, one only needs to input the conversational text or data for analysis. Subsequently, the model processes the input and returns a reasonably precise estimate of the primary topic of the conversation. For your convenience, the TopicGPT model can be accessed through the following link: https://huggingface.co/spaces/Celestinian/Topic-Detection. **Additional Details** It is essential to note that the input provided to this model must commence with #CONTEXT# (BOS) and terminate with #TOPIC# (EOS). Here is an example: #CONTEXT# The quick brown fox jumps over the lazy dog! #TOPIC#
AlexN/xls-r-300m-pt
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "pt", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "robust-speech-event", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
2023-04-25T15:59:52Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SoccerTwos library_name: ml-agents --- # **poca** Agent playing **SoccerTwos** This is a trained model of a **poca** agent playing **SoccerTwos** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SoccerTwos 2. Step 1: Write your model_id: Apocalypse-19/Soccer2v2 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
AlexaMerens/Owl
[ "license:cc" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ratish/DBERT_ZS_Desc_MAKE_v1.4.1 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ratish/DBERT_ZS_Desc_MAKE_v1.4.1 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0478 - Validation Loss: 0.0497 - Train Accuracy: 1.0 - Epoch: 25 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 3060, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 1.4853 | 1.3110 | 0.8889 | 0 | | 1.2406 | 0.9999 | 0.8889 | 1 | | 0.9564 | 0.7717 | 0.8889 | 2 | | 0.7727 | 0.6130 | 0.8889 | 3 | | 0.5895 | 0.4969 | 0.8889 | 4 | | 0.4935 | 0.4101 | 0.8889 | 5 | | 0.3936 | 0.3439 | 0.8889 | 6 | | 0.3186 | 0.2861 | 0.8889 | 7 | | 0.2737 | 0.2477 | 0.8889 | 8 | | 0.2171 | 0.2156 | 1.0 | 9 | | 0.1996 | 0.1904 | 1.0 | 10 | | 0.1732 | 0.1709 | 1.0 | 11 | | 0.1494 | 0.1504 | 1.0 | 12 | | 0.1343 | 0.1323 | 1.0 | 13 | | 0.1065 | 0.1196 | 1.0 | 14 | | 0.1000 | 0.1083 | 1.0 | 15 | | 0.0961 | 0.0992 | 1.0 | 16 | | 0.0922 | 0.0904 | 1.0 | 17 | | 0.0796 | 0.0833 | 1.0 | 18 | | 0.0666 | 0.0764 | 1.0 | 19 | | 0.0640 | 0.0713 | 1.0 | 20 | | 0.0649 | 0.0675 | 1.0 | 21 | | 0.0612 | 0.0626 | 1.0 | 22 | | 0.0539 | 0.0579 | 1.0 | 23 | | 0.0512 | 0.0532 | 1.0 | 24 | | 0.0478 | 0.0497 | 1.0 | 25 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
AlexaRyck/KEITH
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="jcrOrganisation/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Alexander-Learn/bert-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-sprint-meds results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-sprint-meds This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8121 - Accuracy: 0.8843 - F1: 0.8655 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.4894 | 1.0 | 21 | 0.9107 | 0.8612 | 0.8354 | | 0.4471 | 2.0 | 42 | 0.8964 | 0.8630 | 0.8363 | | 0.4086 | 3.0 | 63 | 0.8796 | 0.8612 | 0.8348 | | 0.3651 | 4.0 | 84 | 0.8581 | 0.8665 | 0.8415 | | 0.3365 | 5.0 | 105 | 0.8546 | 0.8683 | 0.8429 | | 0.3241 | 6.0 | 126 | 0.8448 | 0.8701 | 0.8467 | | 0.299 | 7.0 | 147 | 0.8372 | 0.8683 | 0.8461 | | 0.2498 | 8.0 | 168 | 0.8340 | 0.8737 | 0.8500 | | 0.2579 | 9.0 | 189 | 0.8199 | 0.8737 | 0.8498 | | 0.2526 | 10.0 | 210 | 0.8191 | 0.8772 | 0.8549 | | 0.2243 | 11.0 | 231 | 0.8227 | 0.8719 | 0.8476 | | 0.1888 | 12.0 | 252 | 0.8254 | 0.8719 | 0.8489 | | 0.2159 | 13.0 | 273 | 0.8163 | 0.8772 | 0.8541 | | 0.1845 | 14.0 | 294 | 0.8117 | 0.8754 | 0.8533 | | 0.1774 | 15.0 | 315 | 0.8107 | 0.8772 | 0.8529 | | 0.1503 | 16.0 | 336 | 0.8109 | 0.8790 | 0.8589 | | 0.1565 | 17.0 | 357 | 0.8141 | 0.8772 | 0.8533 | | 0.1539 | 18.0 | 378 | 0.8174 | 0.8772 | 0.8556 | | 0.1393 | 19.0 | 399 | 0.8132 | 0.8790 | 0.8587 | | 0.1279 | 20.0 | 420 | 0.8171 | 0.8826 | 0.8602 | | 0.1231 | 21.0 | 441 | 0.8134 | 0.8808 | 0.8603 | | 0.119 | 22.0 | 462 | 0.8132 | 0.8843 | 0.8628 | | 0.1058 | 23.0 | 483 | 0.8043 | 0.8826 | 0.8631 | | 0.1106 | 24.0 | 504 | 0.8159 | 0.8808 | 0.8596 | | 0.1036 | 25.0 | 525 | 0.8090 | 0.8826 | 0.8612 | | 0.0895 | 26.0 | 546 | 0.8093 | 0.8879 | 0.8666 | | 0.1001 | 27.0 | 567 | 0.8121 | 0.8843 | 0.8636 | | 0.0956 | 28.0 | 588 | 0.8113 | 0.8808 | 0.8609 | | 0.0954 | 29.0 | 609 | 0.8099 | 0.8790 | 0.8581 | | 0.0856 | 30.0 | 630 | 0.8169 | 0.8826 | 0.8616 | | 0.0819 | 31.0 | 651 | 0.8204 | 0.8790 | 0.8590 | | 0.0888 | 32.0 | 672 | 0.8125 | 0.8826 | 0.8644 | | 0.0806 | 33.0 | 693 | 0.8144 | 0.8826 | 0.8628 | | 0.0836 | 34.0 | 714 | 0.8153 | 0.8790 | 0.8583 | | 0.0832 | 35.0 | 735 | 0.8139 | 0.8843 | 0.8644 | | 0.0719 | 36.0 | 756 | 0.8134 | 0.8826 | 0.8623 | | 0.0843 | 37.0 | 777 | 0.8141 | 0.8826 | 0.8637 | | 0.0768 | 38.0 | 798 | 0.8157 | 0.8826 | 0.8616 | | 0.0765 | 39.0 | 819 | 0.8183 | 0.8808 | 0.8621 | | 0.0685 | 40.0 | 840 | 0.8139 | 0.8808 | 0.8628 | | 0.0696 | 41.0 | 861 | 0.8149 | 0.8808 | 0.8631 | | 0.0747 | 42.0 | 882 | 0.8144 | 0.8843 | 0.8655 | | 0.0709 | 43.0 | 903 | 0.8136 | 0.8843 | 0.8655 | | 0.0666 | 44.0 | 924 | 0.8140 | 0.8843 | 0.8661 | | 0.071 | 45.0 | 945 | 0.8123 | 0.8808 | 0.8634 | | 0.0682 | 46.0 | 966 | 0.8137 | 0.8843 | 0.8661 | | 0.0743 | 47.0 | 987 | 0.8119 | 0.8843 | 0.8661 | | 0.069 | 48.0 | 1008 | 0.8113 | 0.8843 | 0.8661 | | 0.0624 | 49.0 | 1029 | 0.8119 | 0.8843 | 0.8655 | | 0.0713 | 50.0 | 1050 | 0.8121 | 0.8843 | 0.8655 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
AlgoveraAI/dcgan
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Contrastive-Tension/BERT-Base-NLI-CT
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2023-04-25T16:30:25Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi_rl results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.48 +/- 2.77 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Adi0010/Taxi_rl", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Culmenus/opus-mt-de-is-finetuned-de-to-is_nr2
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2023-04-25T17:46:34Z
--- tags: - generated_from_trainer metrics: - accuracy - precision - recall model-index: - name: results results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # results This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0956 - Accuracy: 0.9714 - Precision: 0.9704 - Recall: 0.9714 - F1 Score: 0.9708 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 250 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 Score | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:--------:| | 0.2725 | 1.0 | 507 | 0.2315 | 0.9388 | 0.8813 | 0.9388 | 0.9091 | | 0.2883 | 2.0 | 1014 | 0.2167 | 0.9388 | 0.8813 | 0.9388 | 0.9091 | | 0.0762 | 3.0 | 1521 | 0.0956 | 0.9714 | 0.9704 | 0.9714 | 0.9708 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
CurtisBowser/DialoGPT-medium-sora-three
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer model-index: - name: distilroberta-tcfd-disclosure results: [] datasets: - rexarski/TCFD_disclosure language: - en --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilroberta-tcfd-disclosure This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.8681 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 5 - total_train_batch_size: 80 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 5 | 2.3837 | | 2.3918 | 2.0 | 10 | 2.3787 | | 2.3918 | 3.0 | 15 | 2.3704 | | 2.3754 | 4.0 | 20 | 2.3623 | | 2.3754 | 5.0 | 25 | 2.3396 | | 2.2976 | 6.0 | 30 | 2.2599 | | 2.2976 | 7.0 | 35 | 2.1095 | | 2.0439 | 8.0 | 40 | 2.0184 | | 2.0439 | 9.0 | 45 | 1.9059 | | 1.6799 | 10.0 | 50 | 1.8469 | | 1.6799 | 11.0 | 55 | 1.8089 | | 1.2948 | 12.0 | 60 | 1.7263 | | 1.2948 | 13.0 | 65 | 1.7250 | | 0.9621 | 14.0 | 70 | 1.8106 | | 0.9621 | 15.0 | 75 | 1.8073 | | 0.7356 | 16.0 | 80 | 1.8681 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
CurtisBowser/DialoGPT-small-sora
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-04-25T17:50:59Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 229.34 +/- 41.24 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CyberMuffin/DialoGPT-small-ChandlerBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - setfit - sentence-transformers - text-classification pipeline_tag: text-classification --- # andyP/sf-it-xxl-submission_20230425_175048 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("andyP/sf-it-xxl-submission_20230425_175048") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
Cyrell/Cyrell
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T17:52:24Z
--- license: apache-2.0 tags: - classification - generated_from_trainer datasets: - clinc_oos model-index: - name: clasificador-clinc_oos_dataset results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # clasificador-clinc_oos_dataset This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Czapla/Rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: output results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # output This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 1.18.3 - Tokenizers 0.13.3
D3xter1922/electra-base-discriminator-finetuned-mnli
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T18:02:02Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-cartpole_v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 464.27 +/- 77.15 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
DARKVIP3R/DialoGPT-medium-Anakin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2023-04-25T18:02:49Z
--- datasets: - tatsu-lab/alpaca language: - en ---
DCU-NLP/electra-base-irish-cased-discriminator-v1
[ "pytorch", "electra", "pretraining", "ga", "transformers", "irish", "license:apache-2.0" ]
null
{ "architectures": [ "ElectraForPreTraining" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-04-25T18:08:08Z
--- license: bigscience-openrail-m datasets: - generativeaidemo/generadai-sample library_name: adapter-transformers pipeline_tag: text-generation ---
DHBaek/xlm-roberta-large-korquad-mask
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: Dsfajardob/ppo-PyramidsRND 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
DJStomp/TestingSalvoNET
[ "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: mit datasets: - mhhmm/leetcode-solutions-python - deepmind/code_contests language: - en library_name: transformers pipeline_tag: text-generation widget: - text: " # Given an array of integers, return indices of the two numbers such that they add up to a specific target def twoSum(array, target) -> List[int]: " example_title: "Twosum problem" --- LLM: [Salesforce/CodeGen-6B-Mono](https://huggingface.co/Salesforce/codegen-6B-mono) I'm using [Peft](https://github.com/huggingface/peft) for tuning Tuning: - [LoRA](https://github.com/microsoft/LoRA) - [Leetcode](https://huggingface.co/datasets/mhhmm/leetcode-solutions-python) - [Google Deepmind Code contests](https://huggingface.co/datasets/deepmind/code_contests) - Google Colab Pro+ in ~2 hours, shoutout to my friend TieuPhuong
DKpro000/DialoGPT-medium-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T18:21:15Z
--- library_name: keras license: openrail language: - es - en - fr metrics: - f1 pipeline_tag: text-classification --- # Model Card for MiniAM2 <!-- Provide a quick summary of what the model is/does. --> Case sensitive, multilingual, multiattribute model to predict the gender of Twitter users as well as their Organization status from text only input data. ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> MiniAM2 is an assemblage model of an enriched distillation with weak-supervised learning. It is a multilingual model to detect the gender and organizational status of Twitter users based on their name, screen_name, and bio description, that is lighter and outperforms the state-of-the-art [M3](https://github.com/euagendas/m3inference) model. The model is obtained after a novel semi-supervised process that we call "assemblage". This process is a multi-language strategy based on enriching the distillation process and weak-supervised learning with a low number of annotated data. This model can adapt to a language similar to an existing one without annotated data in the target language. We provide our model so social scientists can use it for their analysis. To know more about this process, we will put the link to the publication once we have it. The M3 model is named for its multilingual, multi-modal and multi-attributes characteristics. In our case, we discarded the multi-modal part, focusing on text inputs only. When comparing results we also consider a so-called M2 model: the M3 model inferences without the computer vision. As our model is smaller thanks to the enriched distillation process, we call the final model miniAM2 as a lighter version of a multilingual and multi-attribute model trained with an _assemblage_ process. MiniAM2 provides two key informations: the probability for an observation to be an organization or a human and the probability to be a male or a female. - **Developed by:** Arnault Gombert, Borja Sánchez-López, Jesus Cerquides - **Shared by:** Citibeats - **Model type:** Text-classification - **Language(s) (NLP):** EN, ES, FR - **License:** This software is © 2023 The Social Coin, SL and is licensed under the OPEN RAIL M License. See [license](https://www.citibeats.com/open-rail-m-license-for-citibeats) ### Model Sources <!-- Provide the basic links for the model. --> - **Repository:** Coming soon... - **Paper:** Coming soon... - **Demo:** Coming soon... ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> Social media platforms offer an invaluable wealth of data to understand what is taking place in our society. However, social media data hides demographic biases related to characteristics such as gender or age. Therefore, considering social media data as representative of the population can lead to fallacious interpretations. For instance, in France in 2021, women represent 51.6% of the population, whereas on Twitter they represent only 33.5% of French users. With such a significant difference between social network user demographics and the actual population, detecting the gender or age before delving into a deeper analysis of social phenomena becomes a priority. ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> Bias may appear on every data inferation provided by models. By knowing the empirical distributions present in the data, it is possible to leverage the predictions and reduce bias. We intend to use this model to capture gender distributions in Twitter data in order to know the potential bias in it and apply afterwards other techniques to minimize bias as much as possible. ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> This model is not created to identify the gender or organization status of individuals, and that use is not allowed. We do not allow neither using this model for the purpose of exploiting, harming or attempting to exploit or harm minors in any way. This model has license OpenRAIL and before using the model, the user should read and agree to our white paper and [license conditions](https://www.citibeats.com/open-rail-m-license-for-citibeats). ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Since MiniAM2 is a deep learning model, it does contain bias. Although we studied the performance of MiniAM2 extensively, we did not worked around bias control. For example, the model may tend to predict _Man_ label if the text inputs are too short or if they lack of gender information, just because there is a majority of men as Twitter users. ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ```python from huggingface_hub import from_pretrained_keras miniam2 = from_pretrained_keras("CitibeatsAI/miniam2") ``` Obtain a prediction by just sending a list with the columns _screen_name_, _name_ and _bio_ description of a dataframe to the model ```python predictions = miniam2([df["screen_name"], df["name"], df["bio"]]) ``` The output will be an array of _n_ rows (as much as users sent to the model) and of 3 columns. The i-th row is the prediction of i-th user. First column is the probability to belong to an organization, second column is the probability of man class and third column is the probability of woman class. ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> | Language | Training set | |:-------------|:---------------| | English | 3316545 | | Spanish | 3608997 | | French | 1086762 | ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing 1. Eliminate with a preprocessing step all punctuations, accents and styles on the text (do not lowercase, the model is case sensitive). We provide an example of preprocessing function named _tokenizer_preprocess_multiple_. It requires a dictionary of _patterns_ and corresponding _substitutions_ with identifiers. ```python def tokenizer_preprocess_multiple(text): text = re.sub(r'[!\"#$%&\'()*+,-./:;<=>?@\[\\\]^_`{|}~]', ' ', text) return multiple_replace(text, patterns, substitutions) def search_code(match, patterns, subs): for pat, code in patterns.items(): if match in pat: return subs[code] return "No match" def multiple_replace(text, patterns, substitutions): # Create a regular expression from the dictionary keys # regex = re.compile("(%r)" % "|".join(map(re.escape, d.keys()))) pat = '|'.join(patterns.keys()) # For each match, look-up corresponding value in dictionary return re.sub(pat, lambda mo: search_code(mo.string[mo.start():mo.end()], patterns, substitutions), text) ### Example of patterns and substitutions patterns = { "[0123456789]":"0", u'[aàáâãäåăąǎǟǡǻȁȃȧ𝐚𝑎𝒂𝓪𝔞𝕒𝖆𝖺𝗮𝘢𝙖𝚊𝝰𝞪]': "1", "!": "2", } substitutions = { "0": "numbers", "1": "a", "2": ", prove me wrong", } text = "The letter 'ã' is 𝕒s useful 𝓪s 5!" print(multiple_replace(text, patterns, substitutions)) ``` ``` The letter 'a' is as useful as numbers, prove me wrong ``` #### Training Hyperparameters - **Training regime:** fp32 <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | name | RMSprop | | weight_decay | None | | clipnorm | None | | global_clipnorm | None | | clipvalue | None | | use_ema | False | | ema_momentum | 0.99 | | ema_overwrite_frequency | 100 | | jit_compile | False | | is_legacy_optimizer | False | | learning_rate | 0.0010000000474974513 | | rho | 0.9 | | momentum | 0.0 | | epsilon | 1e-07 | | centered | False | | training_precision | float32 | #### Speeds, Sizes, Times <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> - **Infer speed:** ~2582 users per second - **Model size:** 2.7 M parameters ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> Hard labeled (humanly annotated data) | Language | Test set | |:-------------|:------------| | English | 2568 | | Spanish | 2498 | | French | 2136 | <!--#### Factors These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> We compared our model against [M3](https://github.com/euagendas/m3inference) and against M2, which is the M3 version that accepts only text as input (exactly the same inputs as for MiniAM2) in Accuracy, Recall, Precision, Loss and F1. By using all these metrics, we provide a wider and deeper comparison between models ### Results | Model | Accuracy | Recall | Precision | Loss | F1 | |:------|:---------|:-------|:----------|:-----|:-----| | English| | M2 | 80.92 | 84.55 | 73.75 | 0.49 | 76.81| | M3 | 86.4 | 85.04 | 83.56 | 0.39 | 84.2 | |MiniAM2| 83.39 | 85.07 | 80.05 | 0.5 | 82.14| | Spanish| |M2 | 77.29 | 81.91 | 68.78 | 0.55 | 69.94| |M3 | 88.44 | 86.18 | 86.55 | 0.39 | 86.3 | |MiniAM2| 86.52 | 82.55 | 85.87 | 0.37 | 84.04| |French | | M2 | 68.45 | 74.19 | 63.79 | 0.77 | 63.64| |M3 | 83.17 | 82.46 | 80.63 | 0.47 | 81.42| |MiniAM2| 81.86 | 78.71 | 80.07 | 0.5 | 79.33| Summarizing a lot, MiniAM2 outperforms M2 and even compares with the quality of M3 (which is additionally processing images to make its predictions) #### Summary Multilingual gender and organization status detector model MiniAM2 is lighter, faster and more accurate than M2. According to the experiments, MiniAM2 closely follows the quality of M3, implying that our model which processes only text competes with models that benefit from image input data. In the future, we plan to add more languages given the cheap and fast process developed in our work. ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** Intel Xeon Gold 6148 (TDP of 150W) - **Hours used:** 30 - **Cloud Provider:** Amazon Web Services - **Compute Region:** eu-north-1 - **Carbon Emitted:** 0.05 kgCO2eq/kWh ### Model Architecture and Objective To process the text, MiniAM2 applies two types of tokenization: character-level and word-level tokenizations (except for _screenname_ that uses character tokenization only). The tokenized inputs are fed to embedding layers before two layers of feed-forward neural networks, referred as the deep-learning component (DL) from now on. For every combination of inputs and tokenization, the model has a deep learning (DL) component, referred as DL1,...,DL5. Then MiniAM2 concatenates the 5 deep-learning representations together before a final softmax layer with 3 outputs: _organization_, _man_ and _woman_. <!-- ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] --> ## Citation <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> Coming soon... **BibTeX:** Coming soon... **APA:** Coming soon... <!-- ## Glossary [optional] If relevant, include terms and calculations in this section that can help readers understand the model or model card. [More Information Needed] --> ## Model Card Contact - [email protected]
DTAI-KULeuven/robbertje-1-gb-non-shuffled
[ "pytorch", "roberta", "fill-mask", "nl", "dataset:oscar", "dataset:dbrd", "dataset:lassy-ud", "dataset:europarl-mono", "dataset:conll2002", "arxiv:2101.05716", "transformers", "Dutch", "Flemish", "RoBERTa", "RobBERT", "RobBERTje", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
53
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
alexandrainst/da-emotion-classification-base
[ "pytorch", "tf", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
837
2023-04-25T18:41:33Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 760.46 +/- 75.69 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
alexandrainst/da-hatespeech-classification-base
[ "pytorch", "tf", "safetensors", "bert", "text-classification", "da", "transformers", "license:cc-by-sa-4.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
866
null
--- tags: - autotrain - text-generation widget: - text: "I love 🤗 AutoTrain because " datasets: - abhishek/autotrain-data-llama-alpaca-peft co2_eq_emissions: emissions: 0 --- # Model Trained Using AutoTrain - Problem type: Text Generation - CO2 Emissions (in grams): 0.0000 ## Validation Metrics loss: 0.8808356523513794
alexandrainst/da-ner-base
[ "pytorch", "tf", "bert", "token-classification", "da", "dataset:dane", "transformers", "license:cc-by-sa-4.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
78
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-small-finetuned-esco-summarisation results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-esco-summarisation This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - epoch: 2.0 - eval_accuracy: 0.0694 - eval_loss: 1.8363 - eval_runtime: 209.841 - eval_samples_per_second: 10.436 - eval_steps_per_second: 2.612 - step: 7614 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.25.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.2
alexandrainst/da-hatespeech-detection-small
[ "pytorch", "electra", "text-classification", "da", "transformers", "license:cc-by-4.0" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,506
2023-04-25T18:51:30Z
--- tags: - generated_from_trainer metrics: - wer model-index: - name: pratyush_whisper_small_distil_libri360_enc_8_dec_6_batch_2_epoch_50 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pratyush_whisper_small_distil_libri360_enc_8_dec_6_batch_2_epoch_50 This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.9043 - Wer: 13.0427 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 2 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 512 - total_train_batch_size: 1024 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 6.8528 | 0.49 | 100 | 5.4707 | 94.5857 | | 5.4115 | 0.98 | 200 | 4.7525 | 89.6667 | | 4.44 | 1.48 | 300 | 2.5672 | 47.3154 | | 2.71 | 1.97 | 400 | 2.0272 | 26.9788 | | 2.2003 | 2.46 | 500 | 1.8737 | 20.2713 | | 2.0566 | 2.95 | 600 | 1.8204 | 17.6620 | | 1.9829 | 3.45 | 700 | 1.7948 | 16.2944 | | 1.9501 | 3.94 | 800 | 1.7809 | 15.3891 | | 1.9173 | 4.43 | 900 | 1.7755 | 15.0537 | | 1.9025 | 4.93 | 1000 | 1.7754 | 14.7302 | | 1.8847 | 5.42 | 1100 | 1.7820 | 14.6116 | | 1.8776 | 5.91 | 1200 | 1.7795 | 14.1585 | | 1.8661 | 6.4 | 1300 | 1.7807 | 13.9664 | | 1.8647 | 6.9 | 1400 | 1.7841 | 13.9940 | | 1.858 | 7.39 | 1500 | 1.7921 | 13.9489 | | 1.8608 | 7.88 | 1600 | 1.7997 | 13.9269 | | 1.858 | 8.37 | 1700 | 1.8084 | 13.9370 | | 1.8621 | 8.87 | 1800 | 1.8160 | 13.8414 | | 1.8633 | 9.36 | 1900 | 1.8221 | 13.9627 | | 1.8663 | 9.85 | 2000 | 1.8259 | 14.0013 | | 1.8667 | 10.34 | 2100 | 1.8429 | 13.9379 | | 1.865 | 10.84 | 2200 | 1.8406 | 13.9011 | | 1.8614 | 11.33 | 2300 | 1.8401 | 13.5887 | | 1.8564 | 11.82 | 2400 | 1.8587 | 13.5739 | | 1.8552 | 12.32 | 2500 | 1.8514 | 13.5620 | | 1.8523 | 12.81 | 2600 | 1.8561 | 13.3295 | | 1.8551 | 13.3 | 2700 | 1.8581 | 13.3148 | | 1.8521 | 13.79 | 2800 | 1.8650 | 13.1594 | | 1.8522 | 14.29 | 2900 | 1.8729 | 13.2385 | | 1.8513 | 14.78 | 3000 | 1.8754 | 13.1778 | | 1.8524 | 15.27 | 3100 | 1.8814 | 13.0611 | | 1.8495 | 15.76 | 3200 | 1.8867 | 13.2504 | | 1.8553 | 16.26 | 3300 | 1.8860 | 13.0942 | | 1.8531 | 16.75 | 3400 | 1.8884 | 12.8175 | | 1.8545 | 17.24 | 3500 | 1.9003 | 12.8598 | | 1.8533 | 17.73 | 3600 | 1.8982 | 13.0381 | | 1.8548 | 18.23 | 3700 | 1.9005 | 13.0299 | | 1.8542 | 18.72 | 3800 | 1.9067 | 13.0289 | | 1.8552 | 19.21 | 3900 | 1.9043 | 13.0427 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1 - Datasets 2.7.0 - Tokenizers 0.11.0
DaisyMak/bert-finetuned-squad-transformerfrozen-testtoken
[ "pytorch", "tensorboard", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-04-26T03:41:25Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: plant-seedlings-model-ConvNet-all-train results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9392265193370166 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # plant-seedlings-model-ConvNet-all-train This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.2653 - Accuracy: 0.9392 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2307 | 0.25 | 100 | 0.4912 | 0.8729 | | 0.0652 | 0.49 | 200 | 0.3280 | 0.9085 | | 0.1854 | 0.74 | 300 | 0.4850 | 0.8711 | | 0.1831 | 0.98 | 400 | 0.3827 | 0.8938 | | 0.1636 | 1.23 | 500 | 0.4071 | 0.9012 | | 0.0868 | 1.47 | 600 | 0.3980 | 0.8999 | | 0.2298 | 1.72 | 700 | 0.4855 | 0.8846 | | 0.2291 | 1.97 | 800 | 0.4019 | 0.8883 | | 0.2698 | 2.21 | 900 | 0.3855 | 0.8944 | | 0.0923 | 2.46 | 1000 | 0.3690 | 0.8938 | | 0.1396 | 2.7 | 1100 | 0.4715 | 0.8760 | | 0.174 | 2.95 | 1200 | 0.3710 | 0.9006 | | 0.1009 | 3.19 | 1300 | 0.3481 | 0.9030 | | 0.1162 | 3.44 | 1400 | 0.3502 | 0.9153 | | 0.1737 | 3.69 | 1500 | 0.4034 | 0.8999 | | 0.2478 | 3.93 | 1600 | 0.4053 | 0.8913 | | 0.1471 | 4.18 | 1700 | 0.3555 | 0.9036 | | 0.1873 | 4.42 | 1800 | 0.3769 | 0.9122 | | 0.0615 | 4.67 | 1900 | 0.4147 | 0.8987 | | 0.1718 | 4.91 | 2000 | 0.2779 | 0.9214 | | 0.1012 | 5.16 | 2100 | 0.3239 | 0.9159 | | 0.0967 | 5.41 | 2200 | 0.3290 | 0.9079 | | 0.0873 | 5.65 | 2300 | 0.4057 | 0.9055 | | 0.0567 | 5.9 | 2400 | 0.3821 | 0.9018 | | 0.1356 | 6.14 | 2500 | 0.4183 | 0.8944 | | 0.168 | 6.39 | 2600 | 0.3755 | 0.9067 | | 0.1592 | 6.63 | 2700 | 0.3413 | 0.9079 | | 0.1239 | 6.88 | 2800 | 0.3299 | 0.9091 | | 0.0382 | 7.13 | 2900 | 0.3391 | 0.9165 | | 0.1167 | 7.37 | 3000 | 0.4274 | 0.8987 | | 0.109 | 7.62 | 3100 | 0.3952 | 0.9018 | | 0.0591 | 7.86 | 3200 | 0.4043 | 0.9122 | | 0.1407 | 8.11 | 3300 | 0.3325 | 0.9134 | | 0.054 | 8.35 | 3400 | 0.3333 | 0.9177 | | 0.0633 | 8.6 | 3500 | 0.3275 | 0.9208 | | 0.1038 | 8.85 | 3600 | 0.3982 | 0.9042 | | 0.0435 | 9.09 | 3700 | 0.3656 | 0.9190 | | 0.1549 | 9.34 | 3800 | 0.3367 | 0.9190 | | 0.2299 | 9.58 | 3900 | 0.3872 | 0.9134 | | 0.0375 | 9.83 | 4000 | 0.3206 | 0.9245 | | 0.0204 | 10.07 | 4100 | 0.3133 | 0.9263 | | 0.1208 | 10.32 | 4200 | 0.3373 | 0.9196 | | 0.0617 | 10.57 | 4300 | 0.3045 | 0.9220 | | 0.1426 | 10.81 | 4400 | 0.2972 | 0.9294 | | 0.0351 | 11.06 | 4500 | 0.3409 | 0.9147 | | 0.0311 | 11.3 | 4600 | 0.3003 | 0.9233 | | 0.1255 | 11.55 | 4700 | 0.3447 | 0.9282 | | 0.0569 | 11.79 | 4800 | 0.2703 | 0.9331 | | 0.0918 | 12.04 | 4900 | 0.3170 | 0.9245 | | 0.0656 | 12.29 | 5000 | 0.3223 | 0.9190 | | 0.0971 | 12.53 | 5100 | 0.3209 | 0.9196 | | 0.0742 | 12.78 | 5200 | 0.3030 | 0.9282 | | 0.0662 | 13.02 | 5300 | 0.2780 | 0.9319 | | 0.0453 | 13.27 | 5400 | 0.3360 | 0.9227 | | 0.0869 | 13.51 | 5500 | 0.2417 | 0.9343 | | 0.1786 | 13.76 | 5600 | 0.3078 | 0.9263 | | 0.1563 | 14.0 | 5700 | 0.3046 | 0.9312 | | 0.0584 | 14.25 | 5800 | 0.3011 | 0.9288 | | 0.0783 | 14.5 | 5900 | 0.2705 | 0.9288 | | 0.0486 | 14.74 | 6000 | 0.2583 | 0.9288 | | 0.094 | 14.99 | 6100 | 0.2854 | 0.9282 | | 0.0852 | 15.23 | 6200 | 0.2693 | 0.9325 | | 0.0665 | 15.48 | 6300 | 0.2754 | 0.9282 | | 0.0948 | 15.72 | 6400 | 0.2598 | 0.9349 | | 0.0368 | 15.97 | 6500 | 0.2875 | 0.9355 | | 0.0031 | 16.22 | 6600 | 0.2679 | 0.9325 | | 0.0796 | 16.46 | 6700 | 0.2642 | 0.9300 | | 0.0903 | 16.71 | 6800 | 0.2977 | 0.9269 | | 0.0952 | 16.95 | 6900 | 0.2615 | 0.9337 | | 0.1344 | 17.2 | 7000 | 0.2948 | 0.9251 | | 0.0854 | 17.44 | 7100 | 0.2748 | 0.9368 | | 0.0891 | 17.69 | 7200 | 0.2386 | 0.9325 | | 0.1202 | 17.94 | 7300 | 0.2509 | 0.9355 | | 0.0832 | 18.18 | 7400 | 0.2406 | 0.9398 | | 0.0949 | 18.43 | 7500 | 0.2356 | 0.9386 | | 0.0404 | 18.67 | 7600 | 0.2415 | 0.9386 | | 0.1008 | 18.92 | 7700 | 0.2582 | 0.9355 | | 0.092 | 19.16 | 7800 | 0.2724 | 0.9325 | | 0.0993 | 19.41 | 7900 | 0.2655 | 0.9325 | | 0.0593 | 19.66 | 8000 | 0.2423 | 0.9386 | | 0.1011 | 19.9 | 8100 | 0.2653 | 0.9392 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Daivakai/DialoGPT-small-saitama
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
Access to model bigcode/starcoderbase-megatron is restricted and you are not in the authorized list. Visit https://huggingface.co/bigcode/starcoderbase-megatron to ask for access.
Daltcamalea01/Camaleaodalt
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T19:05:41Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 5610 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `__main__.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 2, "evaluation_steps": 1000, "evaluator": "sentence_transformers.evaluation.SequentialEvaluator.SequentialEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "eps": 1e-06, "lr": 0.0001 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 1000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 384, 'out_features': 768, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
DanL/scientific-challenges-and-directions
[ "pytorch", "bert", "text-classification", "en", "dataset:DanL/scientific-challenges-and-directions-dataset", "arxiv:2108.13751", "transformers", "generated_from_trainer" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
134
2023-04-25T19:10:43Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: finetuning-distilbert-hate-speech-score-model-all-samples-250423 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-distilbert-hate-speech-score-model-all-samples-250423 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2979 - Mse: 0.2979 - Rmse: 0.5458 - Mae: 0.2755 - R2: 0.9475 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Danbi/distilroberta-base-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T19:11:50Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: finetuned-Sentiment-classfication-DISTILBERT-model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-Sentiment-classfication-DISTILBERT-model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.5644 - Rmse: 0.6239 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 7 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rmse | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.6862 | 4.0 | 500 | 0.5644 | 0.6239 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Dandara/bertimbau-socioambiental
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: plant-seedlings-model-resnet-152-2 results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9381139489194499 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # plant-seedlings-model-resnet-152-2 This model is a fine-tuned version of [microsoft/resnet-152](https://huggingface.co/microsoft/resnet-152) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.2242 - Accuracy: 0.9381 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 2.0105 | 0.2 | 100 | 1.8953 | 0.4062 | | 0.995 | 0.39 | 200 | 1.0372 | 0.6685 | | 0.9354 | 0.59 | 300 | 0.7713 | 0.7461 | | 0.6444 | 0.79 | 400 | 0.6037 | 0.8026 | | 0.6477 | 0.98 | 500 | 0.5981 | 0.7991 | | 0.6551 | 1.18 | 600 | 0.5224 | 0.8310 | | 0.6466 | 1.38 | 700 | 0.5216 | 0.8222 | | 0.4006 | 1.57 | 800 | 0.4244 | 0.8541 | | 0.4484 | 1.77 | 900 | 0.4513 | 0.8566 | | 0.5155 | 1.96 | 1000 | 0.4071 | 0.8649 | | 0.518 | 2.16 | 1100 | 0.4155 | 0.8679 | | 0.3762 | 2.36 | 1200 | 0.4152 | 0.8733 | | 0.5409 | 2.55 | 1300 | 0.4038 | 0.8728 | | 0.3184 | 2.75 | 1400 | 0.3683 | 0.8777 | | 0.3861 | 2.95 | 1500 | 0.3675 | 0.8811 | | 0.4824 | 3.14 | 1600 | 0.4404 | 0.8595 | | 0.2793 | 3.34 | 1700 | 0.3696 | 0.8816 | | 0.4095 | 3.54 | 1800 | 0.3102 | 0.8939 | | 0.4151 | 3.73 | 1900 | 0.3558 | 0.8875 | | 0.4036 | 3.93 | 2000 | 0.3215 | 0.8998 | | 0.3547 | 4.13 | 2100 | 0.3511 | 0.8885 | | 0.3071 | 4.32 | 2200 | 0.3376 | 0.8885 | | 0.3448 | 4.52 | 2300 | 0.3807 | 0.8743 | | 0.3574 | 4.72 | 2400 | 0.2826 | 0.9106 | | 0.4435 | 4.91 | 2500 | 0.3275 | 0.9013 | | 0.2811 | 5.11 | 2600 | 0.3285 | 0.9003 | | 0.3514 | 5.3 | 2700 | 0.3562 | 0.8949 | | 0.2323 | 5.5 | 2800 | 0.3023 | 0.9037 | | 0.3736 | 5.7 | 2900 | 0.3012 | 0.8998 | | 0.2659 | 5.89 | 3000 | 0.3243 | 0.8964 | | 0.3934 | 6.09 | 3100 | 0.3007 | 0.9042 | | 0.1951 | 6.29 | 3200 | 0.2643 | 0.9204 | | 0.2882 | 6.48 | 3300 | 0.2816 | 0.9175 | | 0.1887 | 6.68 | 3400 | 0.2669 | 0.9165 | | 0.3612 | 6.88 | 3500 | 0.3215 | 0.8993 | | 0.1423 | 7.07 | 3600 | 0.2684 | 0.9170 | | 0.2935 | 7.27 | 3700 | 0.2826 | 0.9072 | | 0.1549 | 7.47 | 3800 | 0.2783 | 0.9072 | | 0.2678 | 7.66 | 3900 | 0.2535 | 0.9140 | | 0.1954 | 7.86 | 4000 | 0.2578 | 0.9136 | | 0.2319 | 8.06 | 4100 | 0.2595 | 0.9106 | | 0.2016 | 8.25 | 4200 | 0.2671 | 0.9160 | | 0.284 | 8.45 | 4300 | 0.2688 | 0.9136 | | 0.1635 | 8.64 | 4400 | 0.3101 | 0.9111 | | 0.2609 | 8.84 | 4500 | 0.2990 | 0.9145 | | 0.1826 | 9.04 | 4600 | 0.2630 | 0.9077 | | 0.2091 | 9.23 | 4700 | 0.2712 | 0.9180 | | 0.1217 | 9.43 | 4800 | 0.2550 | 0.9126 | | 0.198 | 9.63 | 4900 | 0.2648 | 0.9140 | | 0.2123 | 9.82 | 5000 | 0.2819 | 0.9116 | | 0.1399 | 10.02 | 5100 | 0.2690 | 0.9165 | | 0.2429 | 10.22 | 5200 | 0.2685 | 0.9194 | | 0.1376 | 10.41 | 5300 | 0.2930 | 0.9091 | | 0.192 | 10.61 | 5400 | 0.3042 | 0.9101 | | 0.1872 | 10.81 | 5500 | 0.2693 | 0.9160 | | 0.1629 | 11.0 | 5600 | 0.2563 | 0.9185 | | 0.2487 | 11.2 | 5700 | 0.2476 | 0.9258 | | 0.242 | 11.39 | 5800 | 0.2407 | 0.9283 | | 0.166 | 11.59 | 5900 | 0.2382 | 0.9317 | | 0.1181 | 11.79 | 6000 | 0.2576 | 0.9140 | | 0.1407 | 11.98 | 6100 | 0.2520 | 0.9268 | | 0.1931 | 12.18 | 6200 | 0.2634 | 0.9204 | | 0.1064 | 12.38 | 6300 | 0.2655 | 0.9219 | | 0.1261 | 12.57 | 6400 | 0.2569 | 0.9209 | | 0.1978 | 12.77 | 6500 | 0.2801 | 0.9131 | | 0.2031 | 12.97 | 6600 | 0.2541 | 0.9190 | | 0.1245 | 13.16 | 6700 | 0.2331 | 0.9249 | | 0.2824 | 13.36 | 6800 | 0.2573 | 0.9199 | | 0.1302 | 13.56 | 6900 | 0.2452 | 0.9219 | | 0.0825 | 13.75 | 7000 | 0.2384 | 0.9258 | | 0.1491 | 13.95 | 7100 | 0.2373 | 0.9303 | | 0.1859 | 14.15 | 7200 | 0.2623 | 0.9253 | | 0.2094 | 14.34 | 7300 | 0.2308 | 0.9303 | | 0.14 | 14.54 | 7400 | 0.2377 | 0.9298 | | 0.1836 | 14.73 | 7500 | 0.2389 | 0.9268 | | 0.1347 | 14.93 | 7600 | 0.2205 | 0.9327 | | 0.0747 | 15.13 | 7700 | 0.2375 | 0.9288 | | 0.1448 | 15.32 | 7800 | 0.2277 | 0.9342 | | 0.0885 | 15.52 | 7900 | 0.2560 | 0.9219 | | 0.0975 | 15.72 | 8000 | 0.2082 | 0.9293 | | 0.1185 | 15.91 | 8100 | 0.2561 | 0.9214 | | 0.1544 | 16.11 | 8200 | 0.2599 | 0.9283 | | 0.0959 | 16.31 | 8300 | 0.2418 | 0.9263 | | 0.0835 | 16.5 | 8400 | 0.2521 | 0.9352 | | 0.0846 | 16.7 | 8500 | 0.2258 | 0.9347 | | 0.1255 | 16.9 | 8600 | 0.2170 | 0.9342 | | 0.1116 | 17.09 | 8700 | 0.2462 | 0.9288 | | 0.1331 | 17.29 | 8800 | 0.2123 | 0.9420 | | 0.0895 | 17.49 | 8900 | 0.2513 | 0.9293 | | 0.1628 | 17.68 | 9000 | 0.2223 | 0.9283 | | 0.2152 | 17.88 | 9100 | 0.2144 | 0.9396 | | 0.1074 | 18.07 | 9200 | 0.2295 | 0.9376 | | 0.1888 | 18.27 | 9300 | 0.2557 | 0.9337 | | 0.1014 | 18.47 | 9400 | 0.2007 | 0.9411 | | 0.0341 | 18.66 | 9500 | 0.2289 | 0.9371 | | 0.0365 | 18.86 | 9600 | 0.2434 | 0.9337 | | 0.1099 | 19.06 | 9700 | 0.2222 | 0.9337 | | 0.1303 | 19.25 | 9800 | 0.2208 | 0.9317 | | 0.1209 | 19.45 | 9900 | 0.2151 | 0.9401 | | 0.2119 | 19.65 | 10000 | 0.2209 | 0.9376 | | 0.0734 | 19.84 | 10100 | 0.2242 | 0.9381 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Darein/Def
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T19:20:20Z
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -161.30 +/- 89.63 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters
DarkWolf/kn-electra-small
[ "pytorch", "electra", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-04-25T19:22:48Z
--- license: apache-2.0 tags: - setfit - sentence-transformers - text-classification pipeline_tag: text-classification --- # andyP/sf-it-submission_20230425_191818 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("andyP/sf-it-submission_20230425_191818") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
Darkecho789/email-gen
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Pixelcopter-PLE-v0 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 14.80 +/- 12.35 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
DarkestSky/distilbert-base-uncased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T19:24:46Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8609120891618334 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1400 - F1: 0.8609 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2581 | 1.0 | 525 | 0.1584 | 0.8233 | | 0.1252 | 2.0 | 1050 | 0.1384 | 0.8491 | | 0.0811 | 3.0 | 1575 | 0.1400 | 0.8609 | ### Framework versions - Transformers 4.11.3 - Pytorch 2.0.0+cu118 - Datasets 1.16.1 - Tokenizers 0.10.3
Darya/layoutlmv2-finetuned-funsd-test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 2794.24 +/- 97.96 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Daryaflp/roberta-retrained_ru_covid
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-04-25T19:33:59Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 13.18 +/- 6.83 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r J3/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
DataikuNLP/TinyBERT_General_4L_312D
[ "pytorch", "jax", "bert", "arxiv:1909.10351", "transformers" ]
null
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
74
2023-04-25T19:34:24Z
--- library_name: stable-baselines3 tags: - PandaReachDense-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: PandaReachDense-v2 type: PandaReachDense-v2 metrics: - type: mean_reward value: -1.89 +/- 0.51 name: mean_reward verified: false --- # **A2C** Agent playing **PandaReachDense-v2** This is a trained model of a **A2C** agent playing **PandaReachDense-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DataikuNLP/camembert-base
[ "pytorch", "tf", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Access to model Markuus/bert-base-uncased-squad-v2 is restricted and you are not in the authorized list. Visit https://huggingface.co/Markuus/bert-base-uncased-squad-v2 to ask for access.
DataikuNLP/paraphrase-multilingual-MiniLM-L12-v2
[ "pytorch", "bert", "arxiv:1908.10084", "sentence-transformers", "feature-extraction", "sentence-similarity", "transformers", "license:apache-2.0" ]
sentence-similarity
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,517
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/473191190477537280/bWjYb1Rb_400x400.jpeg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Yannic Kilcher, Tech Sister</div> <div style="text-align: center; font-size: 14px;">@ykilcher</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Yannic Kilcher, Tech Sister. | Data | Yannic Kilcher, Tech Sister | | --- | --- | | Tweets downloaded | 3247 | | Retweets | 729 | | Short tweets | 243 | | Tweets kept | 2275 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/ns85lkrx/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @ykilcher's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/7qkiebya) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/7qkiebya/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/ykilcher') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Dave/twomad-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1124.88 +/- 103.32 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
DavidAMcIntosh/DialoGPT-small-rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T19:46:00Z
--- license: mit tags: - generated_from_keras_callback model-index: - name: Harshkmr/codeparrot-ds results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Harshkmr/codeparrot-ds This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 5.0054 - Validation Loss: 4.0516 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 5e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-05, 'decay_steps': 3423, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 5.0054 | 4.0516 | 0 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
DavidAMcIntosh/small-rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-04-25T19:49:07Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 268.83 +/- 22.52 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Davlan/bert-base-multilingual-cased-finetuned-amharic
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
109
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - mit_restaurants metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-carpentries-restaurant-ner results: - task: name: Token Classification type: token-classification dataset: name: mit_restaurants type: mit_restaurants config: mit_restaurants split: validation args: mit_restaurants metrics: - name: Precision type: precision value: 0.7777264325323475 - name: Recall type: recall value: 0.8091346153846154 - name: F1 type: f1 value: 0.793119698397738 - name: Accuracy type: accuracy value: 0.908908908908909 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-carpentries-restaurant-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the mit_restaurants dataset. It achieves the following results on the evaluation set: - Loss: 0.3068 - Precision: 0.7777 - Recall: 0.8091 - F1: 0.7931 - Accuracy: 0.9089 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 479 | 0.3555 | 0.7344 | 0.7764 | 0.7548 | 0.8934 | | 0.6242 | 2.0 | 958 | 0.3149 | 0.7730 | 0.8106 | 0.7914 | 0.9063 | | 0.297 | 3.0 | 1437 | 0.3068 | 0.7777 | 0.8091 | 0.7931 | 0.9089 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Davlan/bert-base-multilingual-cased-finetuned-hausa
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
151
2023-04-25T19:57:44Z
Model Trained from Toxic Comment Classification Challenge - data from Kaggle GPU in google colab used to do training
Davlan/bert-base-multilingual-cased-finetuned-igbo
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
2023-04-25T19:57:53Z
--- license: mit tags: - generated_from_keras_callback model-index: - name: juro95/xlm-roberta-finetuned-ner-full_0.3_no_comp_or_nace_second results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # juro95/xlm-roberta-finetuned-ner-full_0.3_no_comp_or_nace_second This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0146 - Validation Loss: 0.0260 - Epoch: 3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 96108, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 0.1086 | 0.0537 | 0 | | 0.0406 | 0.0306 | 1 | | 0.0239 | 0.0290 | 2 | | 0.0146 | 0.0260 | 3 | ### Framework versions - Transformers 4.26.1 - TensorFlow 2.6.5 - Datasets 2.3.2 - Tokenizers 0.13.2
Davlan/bert-base-multilingual-cased-finetuned-luo
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de-fr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1645 - F1: 0.8592 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.29 | 1.0 | 715 | 0.1809 | 0.8196 | | 0.1462 | 2.0 | 1430 | 0.1628 | 0.8484 | | 0.0936 | 3.0 | 2145 | 0.1645 | 0.8592 | ### Framework versions - Transformers 4.11.3 - Pytorch 2.0.0+cu118 - Datasets 1.16.1 - Tokenizers 0.10.3
Davlan/bert-base-multilingual-cased-finetuned-wolof
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-04-25T20:02:55Z
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg11.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 132.9 - GMACs: 7.6 - Activations (M): 7.4 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg11.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg11.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg11.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
Davlan/bert-base-multilingual-cased-finetuned-yoruba
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg11_bn.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 132.9 - GMACs: 7.6 - Activations (M): 7.4 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg11_bn.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg11_bn.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg11_bn.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
Davlan/bert-base-multilingual-cased-ner-hrl
[ "pytorch", "tf", "bert", "token-classification", "transformers", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
269,898
null
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg13.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 133.0 - GMACs: 11.3 - Activations (M): 12.3 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg13.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg13.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg13.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
Davlan/byt5-base-eng-yor-mt
[ "pytorch", "t5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2023-04-25T20:06:43Z
--- tags: - generated_from_trainer model-index: - name: flan-t5-large-da-multiwoz2.0_400-ep11-nonstop results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-large-da-multiwoz2.0_400-ep11-nonstop This model was trained from scratch on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 0 ### Framework versions - Transformers 4.18.0 - Pytorch 1.12.1+cu102 - Datasets 1.9.0 - Tokenizers 0.12.1
Davlan/byt5-base-yor-eng-mt
[ "pytorch", "t5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-04-25T20:08:37Z
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg13_bn.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 133.1 - GMACs: 11.3 - Activations (M): 12.3 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg13_bn.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg13_bn.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg13_bn.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
Davlan/m2m100_418M-eng-yor-mt
[ "pytorch", "m2m_100", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "M2M100ForConditionalGeneration" ], "model_type": "m2m_100", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg16.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 138.4 - GMACs: 15.5 - Activations (M): 13.6 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg16.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg16.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg16.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
Davlan/m2m100_418M-yor-eng-mt
[ "pytorch", "m2m_100", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "M2M100ForConditionalGeneration" ], "model_type": "m2m_100", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2023-04-25T20:12:23Z
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg16_bn.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 138.4 - GMACs: 15.5 - Activations (M): 13.6 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg16_bn.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg16_bn.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg16_bn.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
Davlan/mT5_base_yoruba_adr
[ "pytorch", "mt5", "text2text-generation", "arxiv:2003.10564", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg19.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 143.7 - GMACs: 19.6 - Activations (M): 14.9 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg19.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg19.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg19.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
Davlan/mbart50-large-eng-yor-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-04-25T20:15:06Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-fr results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.fr metrics: - name: F1 type: f1 value: 0.8068181818181819 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.3224 - F1: 0.8068 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.7535 | 1.0 | 96 | 0.3975 | 0.7123 | | 0.3202 | 2.0 | 192 | 0.3297 | 0.8090 | | 0.2167 | 3.0 | 288 | 0.3224 | 0.8068 | ### Framework versions - Transformers 4.11.3 - Pytorch 2.0.0+cu118 - Datasets 1.16.1 - Tokenizers 0.10.3
Davlan/mt5-small-en-pcm
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2023-04-25T20:15:58Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion - yayo-nfts thumbnail: "https://yayo.fund/img/yayo/logo_full/logo-pink.png" --- ### yayo-man-nft-model Make your own Yayo man. Use "yayo nft" in the prompt to enable the styling. If you just want to generate backgrounds in the style try using "yayo man *place* background" <-- notice this is man and not nft. More at: https://yayo.fund/
Davlan/mt5-small-pcm-en
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - image-classification - timm library_name: timm license: bsd-3-clause datasets: - imagenet-1k --- # Model card for vgg19_bn.tv_in1k A VGG image classification model. Trained on ImageNet-1k, original torchvision weights. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 143.7 - GMACs: 19.7 - Activations (M): 14.9 - Image size: 224 x 224 - **Papers:** - Very Deep Convolutional Networks for Large-Scale Image Recognition: https://arxiv.org/abs/1409.1556 - **Dataset:** ImageNet-1k - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vgg19_bn.tv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg19_bn.tv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 224, 224]) # torch.Size([1, 128, 112, 112]) # torch.Size([1, 256, 56, 56]) # torch.Size([1, 512, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vgg19_bn.tv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Simonyan2014VeryDC, title={Very Deep Convolutional Networks for Large-Scale Image Recognition}, author={Karen Simonyan and Andrew Zisserman}, journal={CoRR}, year={2014}, volume={abs/1409.1556} } ```
Davlan/mt5_base_eng_yor_mt
[ "pytorch", "mt5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- datasets: - xquad - xtreme - juletxara/xquad_xtreme language: - en - es pipeline_tag: question-answering --- # OtterChat 2 <!-- Provide a quick summary of what the model is/does. --> OtterChat 2 is a brand-new version of OtterChat with new features! ## Model Details * Multilingual * uses roberta now ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [OtterDev](https://replit.com/@OtterDev) - **Model type:** Question Answering[Extractive] - **License:** [More Information Needed] - **Finetuned from model:** [roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2) ### Model Sources <!-- Provide the basic links for the model. --> - **Demo:** https://replit.com/@OtterDev/OtterChat ## Uses This model can be used to extract data from text, such as an essay. ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> The main limitation of this model is you <ins>NEED</ins> to have data in order for it to work. More will be posted when more limitations are found. Another limitation is it is quite slow when translating languages. ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Select "Deploy" and select Interference API to get started.
Davlan/mt5_base_yor_eng_mt
[ "pytorch", "mt5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-04-25T20:18:31Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-it results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme args: PAN-X.it metrics: - name: F1 type: f1 value: 0.8332647179909428 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-it This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2442 - F1: 0.8333 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.8366 | 1.0 | 70 | 0.3126 | 0.7444 | | 0.2814 | 2.0 | 140 | 0.2561 | 0.8094 | | 0.1843 | 3.0 | 210 | 0.2442 | 0.8333 | ### Framework versions - Transformers 4.11.3 - Pytorch 2.0.0+cu118 - Datasets 1.16.1 - Tokenizers 0.10.3
Davlan/naija-twitter-sentiment-afriberta-large
[ "pytorch", "tf", "xlm-roberta", "text-classification", "arxiv:2201.08277", "transformers", "has_space" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
61
2023-04-25T20:22:29Z
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1387.05 +/- 203.84 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Davlan/xlm-roberta-base-finetuned-english
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-04-25T20:26:38Z
--- license: creativeml-openrail-m tags: - stablediffusionapi.com - stable-diffusion-api - text-to-image - ultra-realistic pinned: true --- # amireal API Inference ![generated from stablediffusionapi.com](https://pub-8b49af329fae499aa563997f5d4068a4.r2.dev/generations/3388458241682454332.png) ## Get API Key Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed. Replace Key in below code, change **model_id** to "amireal" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs) Model link: [View model](https://stablediffusionapi.com/models/amireal) Credits: [View credits](https://civitai.com/?query=amireal) View all models: [View Models](https://stablediffusionapi.com/models) import requests import json url = "https://stablediffusionapi.com/api/v3/dreambooth" payload = json.dumps({ "key": "", "model_id": "amireal", "prompt": "actual 8K portrait photo of gareth person, portrait, happy colors, bright eyes, clear eyes, warm smile, smooth soft skin, big dreamy eyes, beautiful intricate colored hair, symmetrical, anime wide eyes, soft lighting, detailed face, by makoto shinkai, stanley artgerm lau, wlop, rossdraws, concept art, digital painting, looking into camera", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
Davlan/xlm-roberta-base-finetuned-hausa
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
234
2023-04-25T20:28:41Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1579615033901944833/ToYSD6TZ_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Young Thug ひ</div> <div style="text-align: center; font-size: 14px;">@youngthug</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Young Thug ひ. | Data | Young Thug ひ | | --- | --- | | Tweets downloaded | 3087 | | Retweets | 770 | | Short tweets | 669 | | Tweets kept | 1648 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/8wsvryqj/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @youngthug's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/d3cp83a8) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/d3cp83a8/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/youngthug') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Davlan/xlm-roberta-base-finetuned-igbo
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
2023-04-25T20:29:11Z
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 12.22 +/- 4.54 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r khatkeashish/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.9.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Davlan/xlm-roberta-base-ner-hrl
[ "pytorch", "xlm-roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
760
2023-04-25T21:32:27Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu model-index: - name: summeraiztion_t5base_en_to_kjven results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # summeraiztion_t5base_en_to_kjven This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7702 - Bleu: 23.612 - Gen Len: 18.1576 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 1.0705 | 1.0 | 2860 | 0.9540 | 21.4263 | 18.131 | | 0.9753 | 2.0 | 5720 | 0.8850 | 22.278 | 18.1371 | | 0.9191 | 3.0 | 8580 | 0.8482 | 22.6985 | 18.1433 | | 0.8845 | 4.0 | 11440 | 0.8207 | 23.0513 | 18.146 | | 0.8654 | 5.0 | 14300 | 0.8015 | 23.2476 | 18.1499 | | 0.8443 | 6.0 | 17160 | 0.7891 | 23.4193 | 18.1525 | | 0.8175 | 7.0 | 20020 | 0.7820 | 23.5084 | 18.1548 | | 0.8192 | 8.0 | 22880 | 0.7741 | 23.538 | 18.1576 | | 0.8077 | 9.0 | 25740 | 0.7712 | 23.5967 | 18.1572 | | 0.8096 | 10.0 | 28600 | 0.7702 | 23.612 | 18.1576 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/Breitbart_modelv7
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for inception_resnet_v2.tf_ens_adv_in1k A Inception-ResNet-v2 image classification model. Adversarially (ensemble) trained on ImageNet-1k by paper authors. Ported from Tensorflow by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 55.8 - GMACs: 13.2 - Activations (M): 25.1 - Image size: 299 x 299 - **Papers:** - https://arxiv.org/abs/1602.07261: https://arxiv.org/abs/1602.07261 - Adversarial Attacks and Defences Competition: https://arxiv.org/abs/1804.00097 - **Original:** https://github.com/tensorflow/models - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('inception_resnet_v2.tf_ens_adv_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'inception_resnet_v2.tf_ens_adv_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 147, 147]) # torch.Size([1, 192, 71, 71]) # torch.Size([1, 320, 35, 35]) # torch.Size([1, 1088, 17, 17]) # torch.Size([1, 1536, 8, 8]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'inception_resnet_v2.tf_ens_adv_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1536, 8, 8) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{Szegedy2016Inceptionv4IA, title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning}, author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alexander A. Alemi}, journal={ArXiv}, year={2016}, volume={abs/1602.07261} } ``` ```bibtex @article{Kurakin2018AdversarialAA, title={Adversarial Attacks and Defences Competition}, author={Alexey Kurakin and Ian J. Goodfellow and Samy Bengio and Yinpeng Dong and Fangzhou Liao and Ming Liang and Tianyu Pang and Jun Zhu and Xiaolin Hu and Cihang Xie and Jianyu Wang and Zhishuai Zhang and Zhou Ren and Alan Loddon Yuille and Sangxia Huang and Yao Zhao and Yuzhe Zhao and Zhonglin Han and Junjiajia Long and Yerkebulan Berdibekov and Takuya Akiba and Seiya Tokui and Motoki Abe}, journal={ArXiv}, year={2018}, volume={abs/1804.00097} } ```
Declan/CNN_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-04-25T21:48:11Z
--- license: cc-by-4.0 tags: - generated_from_trainer model-index: - name: deberta-v3-large-squad2-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-v3-large-squad2-finetuned-squad This model is a fine-tuned version of [deepset/deberta-v3-large-squad2](https://huggingface.co/deepset/deberta-v3-large-squad2) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
Declan/HuffPost_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
Dont be upsetti, here, have some spaghetti! Att: A'eala <3 <p><strong><font size="5">Information</font></strong></p> GPT4-X-Alpasta-30b working with Oobabooga's Text Generation Webui and KoboldAI. <p>This is an attempt at improving Open Assistant's performance as an instruct while retaining its excellent prose. The merge consists of <a href="https://huggingface.co/chansung/gpt4-alpaca-lora-30b">Chansung's GPT4-Alpaca Lora</a> and <a href="https://huggingface.co/OpenAssistant/oasst-sft-6-llama-30b-xor">Open Assistant's native fine-tune</a>.</p> <p><strong><font size="5">Benchmarks</font></strong></p> <p><strong><font size="4">FP16</font></strong></p> <strong>Wikitext2</strong>: 4.6077961921691895 <strong>Ptb-New</strong>: 9.41549301147461 <strong>C4-New</strong>: 6.98392915725708 <p>Benchmarks brought to you by A'eala</p>
Declan/NewYorkTimes_model_v8
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 253.43 +/- 16.39 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Declan/Politico_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Find your model_id: qumingcheng/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Declan/Reuters_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for pit_xs_224.in1k A PiT (Pooling based Vision Transformer) image classification model. Trained on ImageNet-1k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 10.6 - GMACs: 1.4 - Activations (M): 7.7 - Image size: 224 x 224 - **Papers:** - Rethinking Spatial Dimensions of Vision Transformers: https://arxiv.org/abs/2103.16302 - **Dataset:** ImageNet-1k - **Original:** https://github.com/naver-ai/pit ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('pit_xs_224.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'pit_xs_224.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 96, 27, 27]) # torch.Size([1, 192, 14, 14]) # torch.Size([1, 384, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'pit_xs_224.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1, 384) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{heo2021pit, title={Rethinking Spatial Dimensions of Vision Transformers}, author={Byeongho Heo and Sangdoo Yun and Dongyoon Han and Sanghyuk Chun and Junsuk Choe and Seong Joon Oh}, booktitle = {International Conference on Computer Vision (ICCV)}, year={2021}, } ```
DeepChem/ChemBERTa-77M-MTR
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "RobertaForRegression" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7,169
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -185.08 +/- 115.21 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'ppo' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 50000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'gaarsmu/LunarLander_PPO_from_scratch' 'batch_size': 512 'minibatch_size': 128} ```
DeepChem/SmilesTokenizer_PubChem_1M
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
227
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: bangla-para-v5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bangla-para-v5 This model is a fine-tuned version of [mHossain/bangla-para-v4](https://huggingface.co/mHossain/bangla-para-v4) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.0084 - Rouge1: 0.0 - Rouge2: 0.0 - Rougel: 0.0 - Rougelsum: 0.0 - Gen Len: 18.3058 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 5000 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 1.3272 | 1.0 | 9000 | 1.0084 | 0.0 | 0.0 | 0.0 | 0.0 | 18.3058 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
DeepPavlov/bert-base-bg-cs-pl-ru-cased
[ "pytorch", "jax", "bert", "feature-extraction", "bg", "cs", "pl", "ru", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,614
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ratish/DBERT_CleanDesc_Mode_v3.0 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ratish/DBERT_CleanDesc_Mode_v3.0 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0369 - Validation Loss: 0.3284 - Train Accuracy: 0.9231 - Epoch: 8 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 3040, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.6943 | 0.6928 | 0.5128 | 0 | | 0.6642 | 0.5426 | 0.8718 | 1 | | 0.4149 | 0.2977 | 0.9231 | 2 | | 0.2213 | 0.2287 | 0.9231 | 3 | | 0.1456 | 0.2031 | 0.9487 | 4 | | 0.1022 | 0.1724 | 0.9487 | 5 | | 0.0861 | 0.1957 | 0.9231 | 6 | | 0.0638 | 0.2111 | 0.9231 | 7 | | 0.0369 | 0.3284 | 0.9231 | 8 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
DeepPavlov/rubert-base-cased-conversational
[ "pytorch", "jax", "bert", "feature-extraction", "ru", "transformers", "has_space" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17,362
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ratish/DBERT_ZS_CleanCollision_v1.1 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ratish/DBERT_ZS_CleanCollision_v1.1 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0709 - Validation Loss: 1.5936 - Train Accuracy: 0.5862 - Epoch: 9 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 9960, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.9871 | 1.0630 | 0.3448 | 0 | | 0.8663 | 1.0509 | 0.4138 | 1 | | 0.6717 | 0.8617 | 0.5862 | 2 | | 0.4366 | 0.8978 | 0.6897 | 3 | | 0.2911 | 0.6636 | 0.7241 | 4 | | 0.2351 | 1.0674 | 0.6897 | 5 | | 0.1412 | 1.1587 | 0.6552 | 6 | | 0.0980 | 1.3062 | 0.6207 | 7 | | 0.0748 | 1.2605 | 0.6552 | 8 | | 0.0709 | 1.5936 | 0.5862 | 9 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.11.0 - Tokenizers 0.13.3
DeepPavlov/rubert-base-cased-sentence
[ "pytorch", "jax", "bert", "feature-extraction", "ru", "arxiv:1508.05326", "arxiv:1809.05053", "arxiv:1908.10084", "transformers", "has_space" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
46,991
null
--- library_name: ml-agents tags: - Huggy - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Find your model_id: HurricaneSYG/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
DeepPavlov/xlm-roberta-large-en-ru-mnli
[ "pytorch", "xlm-roberta", "text-classification", "en", "ru", "dataset:glue", "dataset:mnli", "transformers", "xlm-roberta-large", "xlm-roberta-large-en-ru", "xlm-roberta-large-en-ru-mnli", "has_space" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
227
null
Based on https://huggingface.co/Fsoft-AIC/CodeCapybara Using https://github.com/qwopqwop200/GPTQ-for-LLaMa triton branch python llama.py CodeCapybara/ c4 --wbits 4 --true-sequential --act-order --groupsize 128 --save_safetensors codecapybara-4bit-128g-gptq.safetensors
DeepPavlov/xlm-roberta-large-en-ru
[ "pytorch", "xlm-roberta", "feature-extraction", "en", "ru", "transformers" ]
feature-extraction
{ "architectures": [ "XLMRobertaModel" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
190
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1532799141008420865/Wjtu-Cea_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Project TXA</div> <div style="text-align: center; font-size: 14px;">@projecttxa</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Project TXA. | Data | Project TXA | | --- | --- | | Tweets downloaded | 2223 | | Retweets | 299 | | Short tweets | 586 | | Tweets kept | 1338 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/lar1fo8i/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @projecttxa's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/xste5pe0) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/xste5pe0/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/projecttxa') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Denny29/DialoGPT-medium-asunayuuki
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.S.text-s13M-b4K
DeskDown/MarianMixFT_en-fil
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.S.laion-s13M-b4K
DeskDown/MarianMixFT_en-hi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2023-04-26T01:30:49Z
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.S.clip-s13M-b4K
DeskDown/MarianMixFT_en-id
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-DataComp.S-s13M-b4K
DeskDown/MarianMixFT_en-ja
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.M-s128M-b4K
DeskDown/MarianMixFT_en-my
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.M.basic-s128M-b4K
DeskDown/MarianMixFT_en-th
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.M.text-s128M-b4K
DeskDown/MarianMixFT_en-vi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.M.image-s128M-b4K
DeskDown/MarianMix_en-ja-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.M.laion-s128M-b4K
DeskDown/MarianMix_en-zh-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-CommonPool.M.clip-s128M-b4K
DeskDown/MarianMix_en-zh_to_vi-ms-hi-ja
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-32-DataComp.M-s128M-b4K
Despin89/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-16-CommonPool.L-s1B-b8K
Dev-DGT/food-dbert-multiling
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-16-CommonPool.L.basic-s1B-b8K
Devid/DialoGPT-small-Miku
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 272.12 +/- 20.58 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Devmapall/paraphrase-quora
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
3
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-16-CommonPool.L.text-s1B-b8K
Dhritam/Zova-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - zero-shot-image-classification - clip library_name: open_clip license: mit --- # Model card for CLIP-ViT-B-16-DataComp.L-s1B-b8K
Dilmk2/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit widget: - src: >- https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png candidate_labels: playing music, playing sports example_title: Cat & Dog library_name: open_clip datasets: - mlfoundations/datacomp_pools pipeline_tag: zero-shot-image-classification --- # Model card for CLIP ViT-L-14 trained DataComp-1B # Table of Contents 1. [Model Details](#model-details) 2. [Uses](#uses) 3. [Training Details](#training-details) 4. [Evaluation](#evaluation) 5. [Acknowledgements](#acknowledgements) 6. [Citation](#citation) 7. [How To Get Started With the Model](#how-to-get-started-with-the-model) # Model Details ## Model Description A CLIP ViT-L/14 model trained with the DataComp-1B (https://github.com/mlfoundations/datacomp) using OpenCLIP (https://github.com/mlfoundations/open_clip). Model training done on the [stability.ai](https://stability.ai/) cluster. # Uses As per the original [OpenAI CLIP model card](https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/model-card.md), this model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such model. The OpenAI CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. Additionally, the DataComp paper (https://arxiv.org/abs/2304.14108) include additional discussion as it relates specifically to the training dataset. ## Direct Use Zero-shot image classification, image and text retrieval, among others. ## Downstream Use Image classification and other image task fine-tuning, linear probe image classification, image generation guiding and conditioning, among others. ## Out-of-Scope Use As per the OpenAI models, **Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. # Training Details ## Training Data This model was trained with the 1.4 Billion samples of the DataComp-1B dataset (https://arxiv.org/abs/2304.14108). **IMPORTANT NOTE:** The motivation behind dataset creation is to democratize research and experimentation around large-scale multi-modal model training and handling of uncurated, large-scale datasets crawled from publically available internet. Our recommendation is therefore to use the dataset for research purposes. Be aware that this large-scale dataset is uncurated. Keep in mind that the uncurated nature of the dataset means that collected links may lead to strongly discomforting and disturbing content for a human viewer. Therefore, please use the demo links with caution and at your own risk. It is possible to extract a “safe” subset by filtering out samples based on the safety tags (using a customized trained NSFW classifier that we built). While this strongly reduces the chance for encountering potentially harmful content when viewing, we cannot entirely exclude the possibility for harmful content being still present in safe mode, so that the warning holds also there. We think that providing the dataset openly to broad research and other interested communities will allow for transparent investigation of benefits that come along with training large-scale models as well as pitfalls and dangers that may stay unreported or unnoticed when working with closed large datasets that remain restricted to a small community. Providing our dataset openly, we however do not recommend using it for creating ready-to-go industrial products, as the basic research about general properties and safety of such large-scale models, which we would like to encourage with this release, is still in progress. ## Training Procedure Please see https://arxiv.org/abs/2304.14108. # Evaluation Evaluation done on 38 datasets, using the [DataComp repo](https://github.com/mlfoundations/datacomp) and the [LAION CLIP Benchmark](https://github.com/LAION-AI/CLIP_benchmark). ## Testing Data, Factors & Metrics ### Testing Data The testing is performed on a suite of 38 datasets. See our paper for more details (https://arxiv.org/abs/2304.14108). ## Results The model achieves a 79.2% zero-shot top-1 accuracy on ImageNet-1k. See our paper for more details and results (https://arxiv.org/abs/2304.14108). # Acknowledgements Acknowledging [stability.ai](https://stability.ai/) for the compute used to train this model. # Citation **BibTeX:** DataComp ```bibtex @article{datacomp, title={DataComp: In search of the next generation of multimodal datasets}, author={Samir Yitzhak Gadre, Gabriel Ilharco, Alex Fang, Jonathan Hayase, Georgios Smyrnis, Thao Nguyen, Ryan Marten, Mitchell Wortsman, Dhruba Ghosh, Jieyu Zhang, Eyal Orgad, Rahim Entezari, Giannis Daras, Sarah Pratt, Vivek Ramanujan, Yonatan Bitton, Kalyani Marathe, Stephen Mussmann, Richard Vencu, Mehdi Cherti, Ranjay Krishna, Pang Wei Koh, Olga Saukh, Alexander Ratner, Shuran Song, Hannaneh Hajishirzi, Ali Farhadi, Romain Beaumont, Sewoong Oh, Alex Dimakis, Jenia Jitsev, Yair Carmon, Vaishaal Shankar, Ludwig Schmidt}, journal={arXiv preprint arXiv:2304.14108}, year={2023} } ``` OpenAI CLIP paper ``` @inproceedings{Radford2021LearningTV, title={Learning Transferable Visual Models From Natural Language Supervision}, author={Alec Radford and Jong Wook Kim and Chris Hallacy and A. Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever}, booktitle={ICML}, year={2021} } ``` OpenCLIP software ``` @software{ilharco_gabriel_2021_5143773, author = {Ilharco, Gabriel and Wortsman, Mitchell and Wightman, Ross and Gordon, Cade and Carlini, Nicholas and Taori, Rohan and Dave, Achal and Shankar, Vaishaal and Namkoong, Hongseok and Miller, John and Hajishirzi, Hannaneh and Farhadi, Ali and Schmidt, Ludwig}, title = {OpenCLIP}, month = jul, year = 2021, note = {If you use this software, please cite it as below.}, publisher = {Zenodo}, version = {0.1}, doi = {10.5281/zenodo.5143773}, url = {https://doi.org/10.5281/zenodo.5143773} } ``` # How to Get Started with the Model See https://github.com/mlfoundations/open_clip
Dmitriiserg/Pxd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# This is a gradio app based on a credit risk prediction model. --- license: - other pipeline_tag: - tabular-classification tags: - finance - code metrics: - f1 - recall - precision language: - en datasets: - marcilioduarte/german_credit_risk --- ## Want to work in a project together or have interest in my services? Reach me: Linkedin: https://www.linkedin.com/in/marcilioduarte98/ Github: https://github.com/marcilioduarte @marcilioduarte | Economics and Data Science
DongHyoungLee/kogpt2-base-v2-finetuned-kogpt2_nsmc_single_sentence_classification
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - zh license: mit tags: - 1.1.0 - generated_from_trainer datasets: - facebook/voxpopuli model-index: - name: SpeechT5 TTS Dutch neunit results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # SpeechT5 TTS Dutch neunit This model is a fine-tuned version of [microsoft/speecht5_tts](https://huggingface.co/microsoft/speecht5_tts) on the VoxPopuli dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.29.0.dev0 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.12.1
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-100
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- library_name: ml-agents tags: - Pyramids - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Pyramids --- # **ppo** Agent playing **Pyramids** This is a trained model of a **ppo** agent playing **Pyramids** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Pyramids 2. Step 1: Find your model_id: Isaac009/ppo-Pyramid 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀