modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
DeskDown/MarianMixFT_en-vi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - generated_from_trainer model-index: - name: results results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # results This model is a fine-tuned version of [Rostlab/prot_bert_bfd](https://huggingface.co/Rostlab/prot_bert_bfd) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 0 - gradient_accumulation_steps: 128 - total_train_batch_size: 2048 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Tokenizers 0.13.3
DeskDown/MarianMix_en-ja-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2023-05-01T04:10:07Z
--- datasets: - gozfarb/ShareGPT_Vicuna_unfiltered --- # Converter https://github.com/practicaldreamer/fastchat-conversation-converter # Trainer https://github.com/oobabooga/text-generation-webui LoRA is totally WIP consider it pre-alpha
DeskDown/MarianMix_en-zh-10
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: other --- Mirror of LLaMA 7B and 13B's quantization from Sparsebit. The author assert NO RIGHT WHATSOEVER. All rights belong to their respective owners. I do not own any of this content.
Despin89/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1559522455382855681/SBC0WX5Z_400x400.png&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Nansen Portfolio</div> <div style="text-align: center; font-size: 14px;">@nansenportfolio</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Nansen Portfolio. | Data | Nansen Portfolio | | --- | --- | | Tweets downloaded | 2117 | | Retweets | 98 | | Short tweets | 226 | | Tweets kept | 1793 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/lp1s6lkg/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @nansenportfolio's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/uc47wqfz) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/uc47wqfz/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/nansenportfolio') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Dev-DGT/food-dbert-multiling
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.52 +/- 2.73 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="cyrodw/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Devid/DialoGPT-small-Miku
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - generated_from_trainer model-index: - name: outputs results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # outputs This model is a fine-tuned version of [jl2010/outputs](https://huggingface.co/jl2010/outputs) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.003 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Author | Date | Image | Other | Paragraph | Title | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:----------------------------------------------------------:|:----------------------------------------------------------:|:----------------------------------------------------------:|:------------------------------------------------------------:|:-----------------------------------------------------------------------------------------:|:----------------------------------------------------------:|:-----------------:|:--------------:|:----------:|:----------------:| | No log | 1.0 | 125 | nan | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 21} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 88} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 96} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 1839} | {'precision': 0.2006172839506173, 'recall': 1.0, 'f1': 0.3341902313624679, 'number': 520} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 28} | 0.2006 | 0.2006 | 0.2006 | 0.2006 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1+cu117 - Datasets 2.8.0 - Tokenizers 0.13.2
DiegoAlysson/opus-mt-en-ro-finetuned-en-to-ro
[ "pytorch", "tensorboard", "marian", "text2text-generation", "dataset:wmt16", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2023-05-01T04:37:59Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - name: Accuracy type: accuracy value: 0.8533333333333334 - name: F1 type: f1 value: 0.8552631578947368 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.3081 - Accuracy: 0.8533 - F1: 0.8553 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Dimedrolza/DialoGPT-small-cyberpunk
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2023-05-01T04:54:16Z
--- license: creativeml-openrail-m base_model: CompVis/stable-diffusion-v1-4 instance_prompt: a photo of sks dog tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA DreamBooth - sayakpaul/dreambooth-text-encoder-test These are LoRA adaption weights for CompVis/stable-diffusion-v1-4. The weights were trained on a photo of sks dog using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png) LoRA for the text encoder was enabled: True.
Dmitry12/sber
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Cartpole-v1-r2 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
DongHyoungLee/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: jikkyjohn/qa_model_distill_bert_squad results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # jikkyjohn/qa_model_distill_bert_squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 3.4480 - Validation Loss: 2.2621 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 500, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 3.4480 | 2.2621 | 0 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Waynehillsdev/Wayne_NLP_mT5
[ "pytorch", "tensorboard", "mt5", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2023-05-01T07:29:06Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu model-index: - name: opus-mt-finetuned-id-to-su results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # opus-mt-finetuned-id-to-su This model is a fine-tuned version of [hermanshid/opus-mt-finetuned-id-to-su](https://huggingface.co/hermanshid/opus-mt-finetuned-id-to-su) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.2138 - Bleu: 30.2906 - Gen Len: 61.8136 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:| | 1.6062 | 1.0 | 2500 | 1.4921 | 25.6912 | 62.7002 | | 1.4351 | 2.0 | 5000 | 1.3702 | 27.6125 | 62.2532 | | 1.3364 | 3.0 | 7500 | 1.3116 | 28.515 | 62.4072 | | 1.2696 | 4.0 | 10000 | 1.2821 | 29.279 | 61.8128 | | 1.2245 | 5.0 | 12500 | 1.2532 | 29.6096 | 61.631 | | 1.1812 | 6.0 | 15000 | 1.2373 | 29.8749 | 61.9136 | | 1.1589 | 7.0 | 17500 | 1.2309 | 29.8342 | 62.038 | | 1.1268 | 8.0 | 20000 | 1.2186 | 30.0483 | 61.9544 | | 1.1055 | 9.0 | 22500 | 1.2158 | 30.3904 | 61.5692 | | 1.1035 | 10.0 | 25000 | 1.2138 | 30.2906 | 61.8136 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Doogie/Waynehills-KE-T5-doogie
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: EinsZwo/en-to-ar_coref_words results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # EinsZwo/en-to-ar_coref_words This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-ar](https://huggingface.co/Helsinki-NLP/opus-mt-en-ar) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.7615 - Validation Loss: 2.0233 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 5e-05, 'decay_steps': 19626, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 2.1489 | 2.0516 | 0 | | 1.9031 | 2.0309 | 1 | | 1.7615 | 2.0233 | 2 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Doohae/q_encoder
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
There are several ways to convert MSG files to the EML file format, but the DataVare MSG to EML conversion application is an effective choice that can swiftly convert single MSG file at once. Numerous email clients are supported, including Thunderbird, Apple mail, eM Client, Incredimail, etc. DataVare MSG to EML Converter Software allows you to convert an endless amount of data error-free and uninterrupted while providing accurate results for your MSG to EML files. All MS Outlook versions, including 2019, 2016, 2013, 2010, 2007, 2003, and 2000, can access this. Users are free to select the destination path according to their needs, allowing them to quickly and efficiently export MSG to a variety of formats as necessary without having to scout around the system. On Windows 10 and earlier, MSG to EML converters easily run it. Simply download the trial version to explore all the features and capabilities. Technical assistance is not required for this utility to function. The best method for converting MSG to EML files is this one. Purchase the licensed version at reasonable prices after trying the free demo. Even vast quantities of files can be managed with only one click thanks to this time-saving program. Read more:- https://www.datavare.com/software/msg-to-eml-converter-expert.html
Doohae/roberta
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - mn license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: bloom-NER-fr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bloom-NER-fr This model is a fine-tuned version of [roberta-large-mnli](https://huggingface.co/roberta-large-mnli) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2930 - Precision: 0.5423 - Recall: 0.6361 - F1: 0.5854 - Accuracy: 0.9004 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.7569 | 1.0 | 47 | 0.4836 | 0.3709 | 0.3924 | 0.3813 | 0.8604 | | 0.4348 | 2.0 | 94 | 0.3771 | 0.4395 | 0.5443 | 0.4863 | 0.8687 | | 0.3607 | 3.0 | 141 | 0.3232 | 0.5115 | 0.6086 | 0.5559 | 0.8953 | | 0.2913 | 4.0 | 188 | 0.2918 | 0.5527 | 0.6255 | 0.5868 | 0.8974 | | 0.2602 | 5.0 | 235 | 0.2835 | 0.5485 | 0.6445 | 0.5926 | 0.9028 | | 0.2332 | 6.0 | 282 | 0.2930 | 0.5423 | 0.6361 | 0.5854 | 0.9004 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-12
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.de split: validation args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8653353814644136 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1339 - F1: 0.8653 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2583 | 1.0 | 525 | 0.1596 | 0.8231 | | 0.1262 | 2.0 | 1050 | 0.1395 | 0.8468 | | 0.0824 | 3.0 | 1575 | 0.1339 | 0.8653 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-4
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
44
null
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - controlnet - jax-diffusers-event inference: true --- # controlnet- ParityError/ControlNet-Shadows These are controlnet weights trained on runwayml/stable-diffusion-v1-5 with new type of conditioning. You can find some example images in the following. prompt: a hollow wooden box and ribbed green ball in the air with shadows on the ground, light direction west+32, light elevation 25 ![images_0)](./images_0.png) prompt: a hollow green cube and silver planet in the sky and metallic wedge with shadows on the ground, light direction east+34, light elevation 25 ![images_1)](./images_1.png) prompt: a tree house with a rope ladder, light direction nw, light elevation 25 ![images_2)](./images_2.png)
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-8
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
2023-05-01T05:51:10Z
--- language: - mn tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: roberta-base-ner-demo results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-ner-demo This model is a fine-tuned version of [bayartsogt/mongolian-roberta-base](https://huggingface.co/bayartsogt/mongolian-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1263 - Precision: 0.9352 - Recall: 0.9416 - F1: 0.9384 - Accuracy: 0.9817 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.161 | 1.0 | 477 | 0.0722 | 0.9132 | 0.9248 | 0.9190 | 0.9786 | | 0.052 | 2.0 | 954 | 0.0732 | 0.9211 | 0.9353 | 0.9282 | 0.9797 | | 0.028 | 3.0 | 1431 | 0.0802 | 0.9280 | 0.9354 | 0.9317 | 0.9804 | | 0.015 | 4.0 | 1908 | 0.0954 | 0.9190 | 0.9324 | 0.9257 | 0.9791 | | 0.0101 | 5.0 | 2385 | 0.0978 | 0.9312 | 0.9385 | 0.9348 | 0.9809 | | 0.0055 | 6.0 | 2862 | 0.1072 | 0.9315 | 0.9392 | 0.9353 | 0.9810 | | 0.0035 | 7.0 | 3339 | 0.1165 | 0.9313 | 0.9392 | 0.9352 | 0.9807 | | 0.0026 | 8.0 | 3816 | 0.1223 | 0.9338 | 0.9403 | 0.9371 | 0.9812 | | 0.002 | 9.0 | 4293 | 0.1234 | 0.9341 | 0.9398 | 0.9369 | 0.9813 | | 0.0009 | 10.0 | 4770 | 0.1263 | 0.9352 | 0.9416 | 0.9384 | 0.9817 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-slanted
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
Use the WholeClear NSF to MSG Converter Software is a secure approach to convert NSF files into MSG file format. This program offers to converts NSF files of any size of the NSF file into MSG file format. Technical and non-technical users can easily to converts NSF file into MSG file format. The converted MSG file can be saved with this tool in a specific location on your own computer. This application entrely preserves all email properties, including To, CC, BCC, From, Subject, Date, and attachments. This utility is the best part of that allows to you preview facility, so that every users can easily to see all the database before the conversion. If you want to understand more about the conversion the program, you may try the best free demo version, which enables you to convert a few items from each folders. This utility is accessible with all the MS Outlook versions such as 2003, 2007, 2010, 2013, 2016, and 2019. With this utility, you can access all Windows operating system versions, including 7, 8, 8.1, 10, XP, and Vista. Read More:- https://www.wholeclear.com/nsf/msg/
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-100
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
2023-05-01T05:55:31Z
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: whatracket results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.4157303273677826 --- # whatracket Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### badminton racket ![badminton racket](images/badminton_racket.jpg) #### squash racket ![squash racket](images/squash_racket.jpg) #### table tennis racket ![table tennis racket](images/table_tennis_racket.jpg) #### tennis racket ![tennis racket](images/tennis_racket.jpg)
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-25
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - textual_inversion inference: true --- # Textual inversion text2image fine-tuning - anic87/textual_inversion_well These are textual inversion adaption weights for runwayml/stable-diffusion-v1-5. You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-50
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Fillemon/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-75
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.52 +/- 2.77 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Fillemon/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
DoyyingFace/bert-asian-hate-tweets-asian-unclean-with-clean-valid
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- license: apache-2.0 tags: - summarization - generated_from_trainer datasets: - wiki_lingua metrics: - rouge model-index: - name: wiki_lingua-es-8-3-5.6e-05-mt5-small-finetuned results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: wiki_lingua type: wiki_lingua config: es split: test args: es metrics: - name: Rouge1 type: rouge value: 22.4103 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wiki_lingua-es-8-3-5.6e-05-mt5-small-finetuned This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the wiki_lingua dataset. It achieves the following results on the evaluation set: - Loss: 2.0242 - Rouge1: 22.4103 - Rouge2: 9.2461 - Rougel: 19.4105 - Rougelsum: 21.758 # Baseline LEAD-64 - Rouge1: 25.16 - Rouge2: 7.28 - Rougel: 16.23 - Rougelsum: 16.23 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 2.8922 | 1.0 | 9537 | 2.1216 | 21.4907 | 8.3405 | 18.2727 | 20.7713 | | 2.4024 | 2.0 | 19074 | 2.0520 | 22.2765 | 9.1257 | 19.2788 | 21.608 | | 2.3131 | 3.0 | 28611 | 2.0242 | 22.4103 | 9.2461 | 19.4105 | 21.758 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
DoyyingFace/bert-asian-hate-tweets-asonam-clean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="ngyewkong/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
DoyyingFace/bert-asian-hate-tweets-asonam-unclean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: unit2-Taxi3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.48 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="barry0121/unit2-Taxi3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
DoyyingFace/bert-asian-hate-tweets-concat-clean-with-unclean-valid
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.48 +/- 2.81 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="ngyewkong/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
DoyyingFace/bert-asian-hate-tweets-concat-clean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: multilingual_1_5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # multilingual_1_5 This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4579 - Accuracy: 0.43 - F1: 0.1480 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
albert-base-v2
[ "pytorch", "tf", "jax", "rust", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,785,283
2023-05-01T06:08:27Z
--- license: bigscience-bloom-rail-1.0 datasets: - OpenAssistant/oasst1 - RyokoAI/ShareGPT52K - Dahoas/full-hh-rlhf - liswei/rm-static-m2m100-zh - fnlp/moss-002-sft-data language: - zh - en --- This is an attempt to replicate the RLHF pipeline ### Base Model We used [bloomz-7b1-mt](https://huggingface.co/bigscience/bloomz-7b1-mt) because of its less-restricted license and multilingual ability. ### Supervised Fintune For SFT we used a combination of multiple datasets including: - [RyokoAI/ShareGPT52K](https://huggingface.co/datasets/RyokoAI/ShareGPT52K) - [GPTeacher](https://github.com/teknium1/GPTeacher) - [Alpaca-GPT4](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) en & zh - Filtered subset of machine-translated ShareGPT dataset into Chinese ### Reward Model For RM we used the code of [reward-modeling](https://github.com/Dahoas/reward-modeling) repo and datasets from - [oasst1](https://huggingface.co/datasets/OpenAssistant/oasst1) - [Dahoas/full-hh-rlhf](https://huggingface.co/datasets/Dahoas/full-hh-rlhf) - [liswei/rm-static-m2m100-zh](https://huggingface.co/datasets/liswei/rm-static-m2m100-zh) ### Reinforcement Learning For RL we used the code of [trlx](https://github.com/CarperAI/trlx) with slight modification. Instead of building value network upon the policy network with a single linear layer, we add another hydra head upon the reference network's frozen bottom layers as value network. ### Example We used Vicuna v1.1 template for model training ``` from transformers import AutoModelForCausalLM, AutoTokenizer checkpoint = "keyfan/bloomz-rlhf" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint).cuda() template = ("A chat between a curious human and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the human's questions. " "USER: {}\nASSISTANT:") question = template.format("Who was the president of the United States in 1955?") inputs = tokenizer.encode(question, return_tensors="pt").cuda() outputs = model.generate(inputs, do_sample=True, top_p=0.8, max_new_tokens=512) print(tokenizer.decode(outputs[0])) ``` ### Evalutions Result on the Chinese [BELLE eval set](https://github.com/LianjiaTech/BELLE/tree/main/eval) | others | rewrite | classification | generation | summarization | extract | open qa | brainstorming | closed qa | macro ave | macro ave w/o others | | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | | 0.619 | 0.873 | 0.706 | 0.934 | 0.755 | 0.619 | 0.527 | 0.908 | 0.615 | 0.728 | 0.742 | * We found in GPT-4 evaluation the order in which the responses were presented has unneglectable affect on the final score even with the very-well designed Vicuna prompt. So we removed the score on the Vicuna eval set.
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,377,486
2023-05-01T06:26:09Z
--- license: mit tags: - generated_from_keras_callback model-index: - name: jikkyjohn/roberta-base-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # jikkyjohn/roberta-base-finetuned-squad This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.6641 - Train End Logits Accuracy: 0.8101 - Train Start Logits Accuracy: 0.7677 - Validation Loss: 0.8159 - Validation End Logits Accuracy: 0.7731 - Validation Start Logits Accuracy: 0.7410 - Epoch: 1 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 22142, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch | |:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:| | 1.0348 | 0.7257 | 0.6841 | 0.8335 | 0.7701 | 0.7375 | 0 | | 0.6641 | 0.8101 | 0.7677 | 0.8159 | 0.7731 | 0.7410 | 1 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
bert-large-cased-whole-word-masking
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,316
2023-05-01T06:37:59Z
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 instance_prompt: a photo of sks well-differentiated-adenocarcinoma tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - dreambooth inference: true --- # DreamBooth - anic87/well This is a dreambooth model derived from runwayml/stable-diffusion-v1-5. The weights were trained on a photo of sks well-differentiated-adenocarcinoma using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. DreamBooth for the text encoder was enabled: False.
bert-large-uncased-whole-word-masking
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
76,685
2023-05-01T06:47:13Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: sayakpcode/bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # sayakpcode/bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.5726 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 16635, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 1.2725 | 0 | | 0.7804 | 1 | | 0.5726 | 2 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
ctrl
[ "pytorch", "tf", "ctrl", "en", "arxiv:1909.05858", "arxiv:1910.09700", "transformers", "license:bsd-3-clause", "has_space" ]
null
{ "architectures": null, "model_type": "ctrl", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17,007
2023-05-01T06:59:31Z
**Product Name** - [DeRose Health Ageless II](https://derose-health-ageless-ii-capsules.company.site/) **Category** - Anti-Aging Capsules **Ingredients** - Ginger, Phyllanthus, Burdock Root, Milk thistle, Artichoke and Beetroot **Refund Policy** - 90-Days Money Back Guarantee **Avilability** - Only On [Official Website](https://www.healthsupplement24x7.com/get-ageless-II) **Official Website** - [https://www.healthsupplement24x7.com/get-ageless-ii](https://www.healthsupplement24x7.com/get-ageless-ii) **What is DeRose Health Ageless II?** ===================================== Your search for a product that shields your skin and nourish it is over. [DeRose Health Ageless II](https://www.scoop.it/topic/derose-health-ageless-ii-reviews-by-derose-health-ageless-ii-reviews) is a new product available in the present time to aid your daily skincare routine. [DeRose Health Ageless II](https://soundcloud.com/mirandaderma/derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots) contains only plants-based ingredients in its formulation to avoid any adverse consequences. The ingredients include dairy thistle and chanca Piedra, vitamins, chicory roots the yarrow flower and zinc oxide in addition to many others. These ingredients have all been tested scientifically and have been shown to help improve the well-being for your skin.  [![](https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEij7oakr2h3kAgNRoj2OKkc0zkdSQy1ZhhgXqNtDWIZOs16lLSHD9YIY0v45jJzNw3aZkBXURLx7--4gM8LKBFWvdTiZxpJQDYxJKAvKMiv2i6FS0AWMtYupQlEPVbUj7YnEZBL1ozBzA6t6ISHGxBYRe-uQ-ZlhiMbn2wsWyMgUaO0tlM_IZVEuH19Hg/w640-h482/Screenshot%20(2486).png)](https://www.healthsupplement24x7.com/get-ageless-ii) **[CLICK HERE To Buy This Product Official Website (Limited Stock)](https://www.healthsupplement24x7.com/get-ageless-ll)** -------------------------------------------------------------------------------------------------------------------------- **How DeRose Health Ageless II Improve Skin Health?** ===================================================== [DeRose Health Ageless II](https://www.ivoox.com/derose-health-ageless-ii-capsules-improvement-in-fine-audios-mp3_rf_107229274_1.html) contains several vitamins and minerals in its composition that are rich in antioxidants and antibacterial properties to promote the quality of your skin. The formula of this supplement reduces inflammation and promotes collagen production to offer you a youthful appearance. It can also remove harmful toxins from your skin and body so that you maintain proper well-being. It can protect your skin from wrinkles and aging and even boost liver function with the help of 100% natural ingredients. **[Visit the Official Website of DeRose Health Ageless II](https://www.healthsupplement24x7.com/get-ageless-ll)** ----------------------------------------------------------------------------------------------------------------- **Benefits Of DeRose Health Ageless II ** =========================================== To summarize the multiple benefits mentioned above, these pills offer many benefits to the skin: **Prevents Wrinkles:** The collagen-boosting properties present in these supplements reduce the appearance of fine lines and wrinkles. **Treats Acne:** With ingredients containing anti-bacterial properties, these supplements help in the treatment of acne caused due to bacteria. **Lowers Inflammation:** The presence of antioxidants helps in fighting off free radicals. This lowers inflammation and reduces redness and swelling. **Targets Hyperpigmentation:** Hyperpigmentation is common because of exposure to the sun. In order to treat it, these supplements use Beetroot which ends up brightening skin. **Lightens Acne Scars:** Acne scars are notorious for leaving deep pits and scars behind. This can, however, be treated with these supplements to give you clear, healthy skin. [![](https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi5jXCsRfA4wb3SR_MB-gTYP6Yzu_l2XE7OW85lYNGYZuK5kTY1PgG783aSUDcO3xW6_BRS5rZJhtCaOlr326mv4FcTUBgg1lmLZCf6LkNgOnMCvaipdzjz9n0kYgbZX6d8SmfDnuGqUrrUXsqZcLgtGoIhOA1115X-VLTITWx4RdAcywoCfl2HMuVi8g/w640-h408/Screenshot%20(2487).png)](https://www.healthsupplement24x7.com/get-ageless-ii) **[Learn More About These Ingredients From The Official Website](https://www.healthsupplement24x7.com/get-ageless-ll)** ----------------------------------------------------------------------------------------------------------------------- **DeRose Health Ageless II Main Ingredients** ============================================= This is a list of some of the main ingredients used in [DeRose Health Ageless II](https://www.prlog.org/12962449-derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots.html): **Beetroot:** Not only this is great for your health by diminishing your blood pressure and sugars, but it’s widely used to prevent early signs of aging and to combat problems such as wrinkles and acne. **Chanca Piedra:** While it’s most commonly used to treat kidney stones, this plant has formidable antioxidant properties, which protect you from harmful free radicals. This means that the cells in your body, including the skin cells, will be protected from harm. **Artichoke Leaves:** This is very good for both your liver and skin. It has superior nutrients, and promotes the natural growth of cells, regenerating damage that has already happened. Also, it’s efficient in detoxing you from bad substances that may cause diseases. **Milk Thistle:** Your skin will look great after ingesting this, as it has the full package: antibacterial, anti-inflammatory, and antiviral support. No matter what is harming your skin, the infection will be completely obliterated with the use of this supplement. **Yarrow Flower:** Like other ingredients on this list, this potent flower has anti-inflammatory and antioxidant properties that will help your body to heal fairly quickly. When combined with the other ingredients, it will have a powerful effect on you. **Vitamin C:** The vitamin C found in the citrus fruit has been proven to prevent sunburns as well as reduce inflammation. It also boosts collagen production, keeping your skin elastic and firm. Vitamin C is an antioxidant, which protects against free radicals. Free radicals are harmful substances that harm DNA and cells. [![](https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgsVhWH4iMsxu2v7uDHECY5KM7y0cB48d-3fKGjJ3XpVWeue_M7uAmZaN0vcWdrBqg8w95U_JoWAS-m9adYE6ZDiQuRnlGUCR4WRc882LG6W2F2CttBsTLiZXOt_ZPsU1FKsnnKxYGJkk2JxJQj363lCZ9LPHJQXIExq1aj1kPA8FFhsK9EKrPEjLM8Qg/w640-h350/Screenshot%20(2488).png)](https://www.healthsupplement24x7.com/get-ageless-ii) **[\*(Special Discounted Pricing Available For A Limited Time Only)](https://www.healthsupplement24x7.com/get-ageless-ll)** --------------------------------------------------------------------------------------------------------------------------- **DeRose Health Ageless II Dosage**  ==================================== Each container of DeRose Health Ageless II Supplement contains 60 capsules containing 30 servings. This means that one bottle will be enough for one month supply as the daily dose is two capsules daily.  Dr. Ally Ray has also shared the best time to follow this daily oral consumption skincare routine as she has mentioned the best time to take the daily dose is 20 to 30 minutes before having a meal. **Pricing Of DeRose Health Ageless II** ======================================= Right now, consumers can go to the official [DeRose Health Ageless II](https://www.yepdesk.com/derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots-) website, where they will find discounts and a 60-day money-back guarantee, and free shipping inside the US. There are no extra fees or subscriptions memberships to purchase: * One Bottle **$59** + Shipping * Two Bottles **$49/each** \+ Shipping * Five Bottles **$35/each** Free Shipping [![](https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjqL1m6a47MGcv6B3rG5JavoHXEqZO67IvZ0weQYis4ZH-dyeUpUkvyYWb4RvleI8SflAelhorAV4m1eJiBNq1pLnkNfz0LgrnxUbcT0BlsTmVo20soBAm2_WLEkQsRcYCr2bESUDilCbohtIyCmxbafQWQhTBmOJXEBL8x4VmiiAG-RkmgP09CA5_Jmg/w640-h526/Screenshot%20(2489).png)](https://www.healthsupplement24x7.com/get-ageless-ii) **[ORDER DERMA PRIME SUPPLEMENT FROM ITS OFFICIAL WEBSITE](https://www.healthsupplement24x7.com/get-ageless-ll)** ----------------------------------------------------------------------------------------------------------------- **DeRose Health Ageless II Shipping and money-back policy** =========================================================== The makers of the DeRose Health Ageless II skincare capsules offer free shipping on every package of the same. There are 3 packages which the company offers in which all 3 of them don’t charge for the shipping. It comes with a 90-day money-back guarantee. If you dislike or aren’t satisfied with the supplement, you can return it immediately and get a refund as soon as possible. You can enjoy a hassle-free return with this money-back policy. **Where to Buy DeRose Health Ageless II?** ========================================== The supplement is only available online but luckily the company is shipping worldwide. So, you can easily place your order for the DeRose Health Ageless II supplement through its official website from any corner of the world. **Conclusion**  =============== Overall Derma Prime Plus reviews, There is no doubt that natural products improve your skin’s health and appearance from the inside out. Derma Prime Plus is a 100% natural supplement that you can use to maintain healthy and moisturized skin. Indeed, it is the best choice you can have to support your youthfulness. Ultimately, for the best possible control of any adverse side effects, we strongly recommend that you use this product as instructed. [![](https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhkUfAUcX3ajywYir9C0g46qX3O49d7w3b0qz60LcgX7EzfnJS588ZHusCrM-J02sCdduTmCbc2GjEttHVX6Kuj17YI8ibXcVm6sPLABoawgBnxGRtM4RJkEu0Qi9h9N7yAd6NHYAZ1UVYrTEpGki8gLSVShKN1oJM8fI5KNqwW_r8CNwY-C3uMiD-Ysw/w640-h344/Screenshot%20(2490).png)](https://www.healthsupplement24x7.com/get-ageless-ii) [Click Here To Order DeRose Health Ageless II Supplement From The Official Website](https://www.healthsupplement24x7.com/get-ageless-ll) ---------------------------------------------------------------------------------------------------------------------------------------- [https://derose-health-ageless-ii-capsules.company.site/](https://derose-health-ageless-ii-capsules.company.site/) [https://healthsupplements24x7.blogspot.com/2023/04/derose-health-ageless-ii.html](https://healthsupplements24x7.blogspot.com/2023/04/derose-health-ageless-ii.html) [https://www.scoop.it/topic/derose-health-ageless-ii-reviews-by-derose-health-ageless-ii-reviews](https://www.scoop.it/topic/derose-health-ageless-ii-reviews-by-derose-health-ageless-ii-reviews) [https://soundcloud.com/mirandaderma/derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots](https://soundcloud.com/mirandaderma/derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots) [https://www.ivoox.com/derose-health-ageless-ii-capsules-improvement-in-fine-audios-mp3\_rf\_107229274\_1.html](https://www.ivoox.com/derose-health-ageless-ii-capsules-improvement-in-fine-audios-mp3_rf_107229274_1.html) [https://www.prlog.org/12962449-derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots.html](https://www.prlog.org/12962449-derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots.html) [https://www.yepdesk.com/derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots-](https://www.yepdesk.com/derose-health-ageless-ii-capsules-improvement-in-fine-lines-coarse-wrinkles-dark-spots-)
distilbert-base-cased-distilled-squad
[ "pytorch", "tf", "rust", "safetensors", "openvino", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "model-index", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
257,745
2023-05-01T07:01:45Z
--- license: other --- 画像は 1girl Negative prompt: (bad quality:1.4),(low quality:1.4) 作り方 01 Anyの塗を強化します 325 2023.05.01 14.38.01 Anything-V3.0 + (Anything-V3.0 - v1-5-pruned) x alpha (0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.35,0.35,0.35,0.35,0.35,0.35 "0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0.35,0.35,0.35,0.35,0.35,0.35 " 0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2 8712e20a5d 8712e20a5d e1441589a6 0 0.25 Add difference:A+(B-C)*alpha TRUE 1 ['safetensors', 'overwrite', 'fp16', 'save metadata', 'save model'] ['image'] normal ![00187-3869891558-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/sp5oHQD7b5i-_z1kq-Gjk.png) 02 やりすぎた感あったので戻していきます 335 2023.05.01 14.53.34 01fp16 + (Anything-V3.0 - v1-5-pruned) x 0 b04662a8f6 8712e20a5d e1441589a6 0 0.25 Add difference:A+(B-C)*alpha FALSE 2 ['safetensors', 'overwrite', 'fp16', 'save metadata', 'save model'] ['image'] OUT04:transformer_blocks.0.attn2.to_q.weight:-1,OUT05:transformer_blocks.0.attn2.to_q.weight:-1,OUT03:transformer_blocks.0.attn2.to_q.weight:-1,IN07:transformer_blocks.0.attn2.to_q.weight:-1,IN08:transformer_blocks.0.attn2.to_q.weight:-1 normal ![00188-3727227439-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/WGkVdWI72h_cvfLQNme0d.png) 03 人体構造がおかしくなっていたので、深層を戻します 350 2023.05.01 15.20.04 02fp16 x 1 + v1-5-pruned x 0 8fd5caf2d7 e1441589a6 e1441589a6 0 0.25 Weight sum:A*(1-alpha)+B*alpha FALSE 4 ['safetensors', 'overwrite', 'fp16', 'save metadata', 'save model'] ['image'] OUT11:skip_connection:0.75,OUT10:skip_connection:0.75,OUT09:skip_connection:0.75,IN11 M00 OUT00 OUT01 OUT02:layers:0.75 normal ![00189-350641963-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/_7NDBEwU5gBqc4s-PWtyx.png) 04 うまく戻らなかったのでもう一回戻します 351 2023.05.01 15.20.42 03fp16 x 1 + v1-5-pruned x 0 05d8ffcfcc e1441589a6 e1441589a6 0 0.25 Weight sum:A*(1-alpha)+B*alpha FALSE 4 ['safetensors', 'overwrite', 'fp16', 'save metadata', 'save model'] ['image'] OUT11:skip_connection:0.75,OUT10:skip_connection:0.75,OUT09:skip_connection:0.75,IN11 M00 OUT00 OUT01 OUT02:layers:0.75 normal ![00190-3430932122-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/1ZVMLuKHYUvedWoaHITL6.png) 05 塗を01に近づけたかったので、cosineBで寄せます 352 2023.05.01 15.34.03 04fp16 x 0.5 + 01fp16 x 0.5 59d991b7f3 b04662a8f6 e1441589a6 0.5 0.25 Weight sum:A*(1-alpha)+B*alpha FALSE 5 ['safetensors', 'overwrite', 'fp16', 'save metadata', 'save model'] ['image'] cosineB ![00191-1915990718-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/cDa1KB8WmkOe-tqs-kClh.png) 06 BASEで人体の安定化を図ってます。多分効果ないです 362 2023.05.01 15.44.09 05fp16 x (1-alpha) + v1-5-pruned x alpha (0.05,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0) 0.05,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2,0.2 5284dd1e4e e1441589a6 e1441589a6 0.5 0.25 Weight sum:A*(1-alpha)+B*alpha TRUE 6 ['safetensors', 'overwrite', 'fp16', 'save metadata', 'save model'] ['image'] normal ![00192-1467933202-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/VnOqlJ-mAwCiZ30oViw5F.png) 遠回りしまして完成です ![00193-3875850622-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/OSxCwCHLl7O83U7O-TuU-.png) ![00194-3577506229-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/NUQhkMFbm488Dcjlf9UqP.png) ![00195-1647193101-1girl.png](https://s3.amazonaws.com/moonup/production/uploads/630742d668bd6e30ddab5e44/Hv383P1NieQ2pt_0qHEPt.png) 別に01で完成でも、問題ないと思います。 ネガティブ系使うと安定すると思います。
distilbert-base-german-cased
[ "pytorch", "safetensors", "distilbert", "fill-mask", "de", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
43,667
2023-05-01T07:04:35Z
--- tags: - autotrain - vision - image-classification datasets: - nandodeomkar/autotrain-data-bone-fracture-detection widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 0.007494454669184296 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 54370127369 - CO2 Emissions (in grams): 0.0075 ## Validation Metrics - Loss: 0.261 - Accuracy: 0.923 - Precision: 0.800 - Recall: 1.000 - AUC: 0.972 - F1: 0.889
distilgpt2
[ "pytorch", "tf", "jax", "tflite", "rust", "coreml", "safetensors", "gpt2", "text-generation", "en", "dataset:openwebtext", "arxiv:1910.01108", "arxiv:2201.08542", "arxiv:2203.12574", "arxiv:1910.09700", "arxiv:1503.02531", "transformers", "exbert", "license:apache-2.0", "model-index", "co2_eq_emissions", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,611,668
2023-05-01T07:17:55Z
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 instance_prompt: a photo of sks poorly-differentiated-adenocarcinoma tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - dreambooth inference: true --- # DreamBooth - anic87/poor This is a dreambooth model derived from runwayml/stable-diffusion-v1-5. The weights were trained on a photo of sks poorly-differentiated-adenocarcinoma using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png) DreamBooth for the text encoder was enabled: False.
AAli/distilgpt2-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-05-01T09:45:22Z
--- license: mit tags: - generated_from_trainer metrics: - accuracy model-index: - name: classify_services_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # classify_services_model This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5475 - Accuracy: 0.7549 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 71 | 0.6433 | 0.6640 | | No log | 2.0 | 142 | 0.5475 | 0.7549 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
ASCCCCCCCC/distilbert-base-uncased-finetuned-clinc
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
2023-05-01T11:47:15Z
--- license: apache-2.0 tags: - text_classification - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion_detection results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.921 - name: F1 type: f1 value: 0.9210457518994596 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion_detection This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2211 - Accuracy: 0.921 - F1: 0.9210 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.7979 | 1.0 | 250 | 0.3147 | 0.906 | 0.9041 | | 0.2464 | 2.0 | 500 | 0.2211 | 0.921 | 0.9210 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Aakansha/hs
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-05-01T12:13:21Z
--- license: apache-2.0 tags: - ctranslate2 --- # Fast-Inference with Ctranslate2 Speedup inference by 2x-8x using int8 inference in C++ quantized version of [declare-lab/flan-alpaca-base](https://huggingface.co/declare-lab/flan-alpaca-base) ```bash pip install hf_hub_ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` Checkpoint compatible to [ctranslate2](https://github.com/OpenNMT/CTranslate2) and [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub model_name = "michaelfeil/ct2fast-flan-alpaca-base" model = TranslatorCT2fromHfHub( # load in int8 on CUDA model_name_or_path=model_name, device="cuda", compute_type="int8_float16" ) outputs = model.generate( text=["How do you call a fast Flan-ingo?", "Translate to german: How are you doing?"], min_decoding_length=24, max_decoding_length=32, max_input_length=512, beam_size=5 ) print(outputs) ``` # Licence and other remarks: This is just a quantized version. Licence conditions are intended to be idential to original huggingface repo.
AdapterHub/bert-base-uncased-pf-rotten_tomatoes
[ "bert", "en", "dataset:rotten_tomatoes", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:sentiment/rotten_tomatoes" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2023-05-01T14:18:38Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-large-cased-sigir-support-refute-no-label-40-2nd-test-LR10-8-fast-19 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-large-cased-sigir-support-refute-no-label-40-2nd-test-LR10-8-fast-19 This model is a fine-tuned version of [jojoUla/bert-large-cased-sigir-support-refute-no-label-40](https://huggingface.co/jojoUla/bert-large-cased-sigir-support-refute-no-label-40) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1514 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 5.1321 | 1.0 | 1 | 1.7547 | | 2.4462 | 2.0 | 2 | 8.9579 | | 2.1549 | 3.0 | 3 | 4.2014 | | 1.7913 | 4.0 | 4 | 0.0533 | | 1.6807 | 5.0 | 5 | 0.7624 | | 1.1819 | 6.0 | 6 | 0.4617 | | 2.5666 | 7.0 | 7 | 3.3459 | | 1.8443 | 8.0 | 8 | 1.1587 | ### Framework versions - Transformers 4.29.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
AdapterHub/roberta-base-pf-ud_pos
[ "roberta", "en", "dataset:universal_dependencies", "arxiv:2104.08247", "adapter-transformers", "token-classification", "adapterhub:pos/ud_ewt" ]
token-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: mit tags: - generated_from_trainer model-index: - name: chilenoGPT results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # chilenoGPT This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.3921 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 30414 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 4.4985 | 1.0 | 3802 | 4.3106 | | 4.1063 | 2.0 | 7604 | 3.9798 | | 3.8797 | 3.0 | 11406 | 3.7886 | | 3.7554 | 4.0 | 15208 | 3.6645 | | 3.616 | 5.0 | 19010 | 3.5792 | | 3.534 | 6.0 | 22812 | 3.5152 | | 3.4631 | 7.0 | 26614 | 3.4632 | | 3.3867 | 8.0 | 30416 | 3.4330 | | 3.2781 | 9.0 | 34218 | 3.3975 | | 3.2074 | 10.0 | 38020 | 3.3921 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Tokenizers 0.13.3
AdapterHub/roberta-base-pf-wic
[ "roberta", "en", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:wordsence/wic" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - stablediffusionapi.com - stable-diffusion-api - text-to-image - ultra-realistic pinned: true --- # LastOfUs_Buildings API Inference ![generated from stablediffusionapi.com](https://pub-8b49af329fae499aa563997f5d4068a4.r2.dev/generations/7610306581682956160.png) ## Get API Key Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed. Replace Key in below code, change **model_id** to "lastofusbuildings" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs) Model link: [View model](https://stablediffusionapi.com/models/lastofusbuildings) Credits: [View credits](https://civitai.com/?query=LastOfUs_Buildings) View all models: [View Models](https://stablediffusionapi.com/models) import requests import json url = "https://stablediffusionapi.com/api/v3/dreambooth" payload = json.dumps({ "key": "", "model_id": "lastofusbuildings", "prompt": "actual 8K portrait photo of gareth person, portrait, happy colors, bright eyes, clear eyes, warm smile, smooth soft skin, big dreamy eyes, beautiful intricate colored hair, symmetrical, anime wide eyes, soft lighting, detailed face, by makoto shinkai, stanley artgerm lau, wlop, rossdraws, concept art, digital painting, looking into camera", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
Adrianaforididk/Jinx
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-large-xls-r-300m-sinhala results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-sinhala This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the None dataset. It achieves the following results on the evaluation set: - eval_loss: 5.4599 - eval_wer: 1.0 - eval_runtime: 185.9221 - eval_samples_per_second: 6.729 - eval_steps_per_second: 0.844 - epoch: 2.53 - step: 100 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 1.18.3 - Tokenizers 0.13.3
AimB/mT5-en-kr-aihub-netflix
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="wzhao82/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
AimB/mT5-en-kr-natural
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
78
null
--- tags: - generated_from_trainer metrics: - accuracy model-index: - name: BERThard results: [] license: mit datasets: - hard language: - ar pipeline_tag: text-classification --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERThard This model is a fine-tuned version of [aubmindlab/bert-base-arabertv2](https://huggingface.co/aubmindlab/bert-base-arabertv2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.4141 - Accuracy: 0.8311 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.4488 | 1.0 | 5946 | 0.4104 | 0.8232 | | 0.3866 | 2.0 | 11892 | 0.4047 | 0.8288 | | 0.3462 | 3.0 | 17838 | 0.4141 | 0.8311 | ### Framework versions - Transformers 4.28.1 - Pytorch 1.12.1+cu116 - Datasets 2.4.0 - Tokenizers 0.12.1
AimB/mT5-en-kr-opus
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="wzhao82/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
Aimendo/Triage
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-pg1-pixelcopter results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 66.80 +/- 54.12 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Akashpb13/Kabyle_xlsr
[ "pytorch", "safetensors", "wav2vec2", "automatic-speech-recognition", "kab", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "sw", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: creativeml-openrail-m base_model: anic87/pcam-tumor-text instance_prompt: a photo of sks noncancerous-pathology-tissue tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - dreambooth inference: true --- # DreamBooth - anic87/pcam-tumor-normal-text This is a dreambooth model derived from anic87/pcam-tumor-text. The weights were trained on a photo of sks noncancerous-pathology-tissue using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png) DreamBooth for the text encoder was enabled: True.
Akashpb13/xlsr_kurmanji_kurdish
[ "pytorch", "safetensors", "wav2vec2", "automatic-speech-recognition", "kmr", "ku", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: ultmedm --- ### edm-ultmedm-v2 Dreambooth model trained by wimvanhenden with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v1-5 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: ultmedm (use that on your prompt) ![ultmedm 0](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%281%29.jpg)![ultmedm 1](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%282%29.jpg)![ultmedm 2](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%283%29.jpg)![ultmedm 3](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%284%29.jpg)![ultmedm 4](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%285%29.jpg)![ultmedm 5](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%286%29.jpg)![ultmedm 6](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%287%29.jpg)![ultmedm 7](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%288%29.jpg)![ultmedm 8](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%289%29.jpg)![ultmedm 9](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2810%29.jpg)![ultmedm 10](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2811%29.jpg)![ultmedm 11](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2812%29.jpg)![ultmedm 12](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2813%29.jpg)![ultmedm 13](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2814%29.jpg)![ultmedm 14](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2815%29.jpg)![ultmedm 15](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2816%29.jpg)![ultmedm 16](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2817%29.jpg)![ultmedm 17](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2818%29.jpg)![ultmedm 18](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2819%29.jpg)![ultmedm 19](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2820%29.jpg)![ultmedm 20](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2821%29.jpg)![ultmedm 21](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2822%29.jpg)![ultmedm 22](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2823%29.jpg)![ultmedm 23](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2824%29.jpg)![ultmedm 24](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2825%29.jpg)![ultmedm 25](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2826%29.jpg)![ultmedm 26](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2827%29.jpg)![ultmedm 27](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2828%29.jpg)![ultmedm 28](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2829%29.jpg)![ultmedm 29](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2830%29.jpg)![ultmedm 30](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2831%29.jpg)![ultmedm 31](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2832%29.jpg)![ultmedm 32](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2833%29.jpg)![ultmedm 33](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2834%29.jpg)![ultmedm 34](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2835%29.jpg)![ultmedm 35](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2836%29.jpg)![ultmedm 36](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2837%29.jpg)![ultmedm 37](https://huggingface.co/wimvanhenden/edm-ultmedm-v2/resolve/main/concept_images/ultmedm_%2838%29.jpg)
Akashpb13/xlsr_maltese_wav2vec2
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "mt", "dataset:common_voice", "transformers", "audio", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ly111/t5small-finetuned-xsum results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ly111/t5small-finetuned-xsum This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 3.1350 - Validation Loss: 2.7416 - Train Rouge1: 20.9336 - Train Rouge2: 3.9725 - Train Rougel: 16.4166 - Train Rougelsum: 16.3889 - Train Gen Len: 18.7077 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Rouge1 | Train Rouge2 | Train Rougel | Train Rougelsum | Train Gen Len | Epoch | |:----------:|:---------------:|:------------:|:------------:|:------------:|:---------------:|:-------------:|:-----:| | 3.1350 | 2.7416 | 20.9336 | 3.9725 | 16.4166 | 16.3889 | 18.7077 | 0 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
AkshatSurolia/ICD-10-Code-Prediction
[ "pytorch", "bert", "transformers", "text-classification", "license:apache-2.0", "has_space" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
994
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: DSChallengeLearningRate results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # DSChallengeLearningRate This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3253 - Accuracy: 0.9202 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1889 | 1.0 | 746 | 0.3511 | 0.9122 | | 0.1358 | 2.0 | 1492 | 0.3253 | 0.9202 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
AkshaySg/langid
[ "multilingual", "dataset:VoxLingua107", "speechbrain", "audio-classification", "embeddings", "Language", "Identification", "pytorch", "ECAPA-TDNN", "TDNN", "VoxLingua107", "license:apache-2.0" ]
audio-classification
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 tags: - summarization - generated_from_trainer datasets: - wiki_lingua metrics: - rouge model-index: - name: wiki_lingua-fr-8-3-5.6e-05-mt5-small-finetuned results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: wiki_lingua type: wiki_lingua config: fr split: test args: fr metrics: - name: Rouge1 type: rouge value: 19.9596 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wiki_lingua-fr-8-3-5.6e-05-mt5-small-finetuned This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the wiki_lingua dataset. It achieves the following results on the evaluation set: - Loss: 1.9117 - Rouge1: 19.9596 - Rouge2: 7.5052 - Rougel: 17.4363 - Rougelsum: 19.5192 # Baseline LEAD-64 - Rouge1: 22.4 - Rouge2: 5.92 - Rougel: 14.44 - Rougelsum: 14.44 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 2.8962 | 1.0 | 5428 | 2.0026 | 18.8621 | 6.6127 | 16.0264 | 18.4354 | | 2.313 | 2.0 | 10856 | 1.9260 | 19.7274 | 7.2791 | 17.0466 | 19.2904 | | 2.2248 | 3.0 | 16284 | 1.9117 | 19.9596 | 7.5052 | 17.4363 | 19.5192 | ### Framework versions - Transformers 4.27.4 - Pytorch 1.13.0 - Datasets 2.1.0 - Tokenizers 0.13.2
Akuva2001/SocialGraph
[ "has_space" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: DSChallengeWeightDecay results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # DSChallengeWeightDecay This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4742 - Accuracy: 0.9199 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.049 | 1.0 | 746 | 0.4537 | 0.9152 | | 0.0204 | 2.0 | 1492 | 0.4742 | 0.9199 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
AlErysvi/Erys
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('SaudxInu/sd-class-butterflies-32') image = pipeline().images[0] image ```
AlanDev/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model mrsdoublecups2023/MRSDOUBLECUPS is restricted and you are not in the authorized list. Visit https://huggingface.co/mrsdoublecups2023/MRSDOUBLECUPS to ask for access.
AlbertHSU/BertTEST
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-05-01T18:48:33Z
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1080023535320424450/kR1TtMHc_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Matthew Keys</div> <div style="text-align: center; font-size: 14px;">@matthewkeyslive</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Matthew Keys. | Data | Matthew Keys | | --- | --- | | Tweets downloaded | 3235 | | Retweets | 865 | | Short tweets | 188 | | Tweets kept | 2182 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/5fkgkze3/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @matthewkeyslive's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/wvvli2np) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/wvvli2np/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/matthewkeyslive') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Aleksandar/electra-srb-ner
[ "pytorch", "safetensors", "electra", "token-classification", "dataset:wikiann", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "ElectraForTokenClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.48 +/- 2.78 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="alix03/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Aleksandar/electra-srb-oscar
[ "pytorch", "electra", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "ElectraForMaskedLM" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3061 - Accuracy: 0.8567 - F1: 0.8562 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Tokenizers 0.13.3
AlexDemon/Alex
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 datasets: - vicgalle/alpaca-gpt4 - ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered language: - en library_name: transformers pipeline_tag: text-generation --- # openaccess-ai-collective/llama-13b-alpaca-wizard ## Trained - `vicgalle/alpaca-gpt4` 1 epoch, learning rate 3e-5 https://wandb.ai/wing-lian/wizard-vicuna-gpt4/overview - `deepspeed scripts/finetune.py configs/axolotl/wizard-vicuna-13b-step1.yml --deepspeed configs/ds_config.json --num_epochs 2 --warmup_steps 46 --logging_steps 1 --save_steps 23` - `wizardlm` https://wandb.ai/wing-lian/wizard-vicuna-gpt4/runs/4y38knw4 - `deepspeed scripts/finetune.py configs/axolotl/wizard-vicuna-13b-step2.yml --deepspeed configs/ds_config-step2.json --num_epochs 2 --logging_steps 1` - `vicuna` TBD <pre>Brought to you by the OpenAccess AI Collective</pre>
AlexN/xls-r-300m-fr-0
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "fr", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.925 - name: F1 type: f1 value: 0.9251879205114556 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2273 - Accuracy: 0.925 - F1: 0.9252 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8401 | 1.0 | 250 | 0.3279 | 0.9025 | 0.8981 | | 0.2575 | 2.0 | 500 | 0.2273 | 0.925 | 0.9252 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Alexander-Learn/bert-finetuned-squad-accelerate
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer metrics: - accuracy model-index: - name: BERTmsda results: [] license: mit language: - ar pipeline_tag: text-classification --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERTmsda This model is a fine-tuned version of [aubmindlab/bert-base-arabertv2](https://huggingface.co/aubmindlab/bert-base-arabertv2) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.4153 - Accuracy: 0.8629 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3887 | 1.0 | 2937 | 0.3706 | 0.8546 | | 0.301 | 2.0 | 5874 | 0.3746 | 0.8632 | | 0.2284 | 3.0 | 8811 | 0.4153 | 0.8629 | ### Framework versions - Transformers 4.28.1 - Pytorch 1.12.1+cu116 - Datasets 2.4.0 - Tokenizers 0.12.1
Alexandru/creative_copilot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-id-law results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-id-law This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8912 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 2.2874 | 1.0 | 6262 | 2.1654 | | 2.0961 | 2.0 | 12524 | 2.0036 | | 2.0255 | 3.0 | 18786 | 1.9364 | | 1.9767 | 4.0 | 25048 | 1.9011 | | 1.9579 | 5.0 | 31310 | 1.8912 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Aliraza47/BERT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 datasets: - vicgalle/alpaca-gpt4 language: - en library_name: transformers pipeline_tag: conversational --- # Llama Adapter 13B - Based on https://github.com/ZrrSkywalker/LLaMA-Adapter using HF transformers - See HF PR https://github.com/huggingface/peft/pull/268 - W&B data: https://wandb.ai/wing-lian/llama-adapter-13b
Alvenir/wav2vec2-base-da
[ "pytorch", "wav2vec2", "pretraining", "da", "transformers", "speech", "license:apache-2.0" ]
null
{ "architectures": [ "Wav2Vec2ForPreTraining" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
62
2023-05-01T21:52:07Z
--- license: openrail datasets: - OpenAssistant/oasst1 language: - en - es metrics: - accuracy library_name: adapter-transformers pipeline_tag: feature-extraction tags: - art ---
aisoftware/Loquela
[ "onnx" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - conversational --- # Bisho DialoGPT Model
Amrrs/south-indian-foods
[ "pytorch", "tensorboard", "vit", "image-classification", "transformers", "huggingpics", "model-index", "autotrain_compatible" ]
image-classification
{ "architectures": [ "ViTForImageClassification" ], "model_type": "vit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - rotten_tomatoes metrics: - accuracy model-index: - name: test_trainer results: - task: name: Text Classification type: text-classification dataset: name: rotten_tomatoes type: rotten_tomatoes config: default split: test args: default metrics: - name: Accuracy type: accuracy value: 0.501 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test_trainer This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the rotten_tomatoes dataset. It achieves the following results on the evaluation set: - Loss: 0.7153 - Accuracy: 0.501 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.6412 | 0.01 | 1 | 0.7288 | 0.501 | | 0.6171 | 0.02 | 2 | 0.7083 | 0.501 | | 0.5805 | 0.02 | 3 | 0.7153 | 0.501 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Anders/itu-ams-summa
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.922 - name: F1 type: f1 value: 0.9221186592426542 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2225 - Accuracy: 0.922 - F1: 0.9221 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 250 | 0.3273 | 0.9025 | 0.8984 | | No log | 2.0 | 500 | 0.2225 | 0.922 | 0.9221 | ### Framework versions - Transformers 4.28.1 - Pytorch 1.11.0 - Datasets 2.11.0 - Tokenizers 0.13.3
Andranik/TestQA2
[ "pytorch", "electra", "question-answering", "transformers", "generated_from_trainer", "autotrain_compatible" ]
question-answering
{ "architectures": [ "ElectraForQuestionAnswering" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: creativeml-openrail-m base_model: stabilityai/stable-diffusion-2-1-base instance_prompt: a photo of sks tumor-tissue-histology tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - dreambooth inference: true --- # DreamBooth - anic87/crc-tumor-text This is a dreambooth model derived from stabilityai/stable-diffusion-2-1-base. The weights were trained on a photo of sks tumor-tissue-histology using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png) DreamBooth for the text encoder was enabled: True.
AndreLiu1225/t5-news-summarizer
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 274.59 +/- 18.01 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) ```python from huggingface_sb3 import load_from_hub from stable_baselines3 import PPO from stable_baselines3.common.env_util import make_vec_env from stable_baselines3.common.evaluation import evaluate_policy checkpoint = load_from_hub("jkkawach/ppo-LunarLander-v2", "ppo-LunarLander-v2.zip") model = PPO.load(checkpoint) env = make_vec_env("LunarLander-v2", n_envs=1) print("Evaluating model") mean_reward, std_reward = evaluate_policy( model, env, n_eval_episodes=20, deterministic=True, ) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") obs = env.reset() try: while True: action, _states = model.predict(obs, deterministic=True) obs, rewards, dones, info = env.step(action) env.render() except KeyboardInterrupt: pass ```
AndreLiu1225/t5-news
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
18
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - name: Accuracy type: accuracy value: 0.93208 - name: F1 type: f1 value: 0.9324367340442463 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2312 - Accuracy: 0.9321 - F1: 0.9324 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.2634 | 1.0 | 1563 | 0.1887 | 0.9275 | 0.9268 | | 0.1467 | 2.0 | 3126 | 0.2312 | 0.9321 | 0.9324 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0 - Datasets 2.12.0 - Tokenizers 0.13.3
AndrewChar/model-QA-5-epoch-RU
[ "tf", "distilbert", "question-answering", "ru", "dataset:sberquad", "transformers", "generated_from_keras_callback", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
109
null
--- license: creativeml-openrail-m base_model: /projects/ac67/projects/diffusers/examples/dreambooth/crc-tumor-text/ instance_prompt: a photo of sks tissue-histology-without-tumor tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - dreambooth inference: true --- # DreamBooth - anic87/crc-tumor-normal-text This is a dreambooth model derived from /projects/ac67/projects/diffusers/examples/dreambooth/crc-tumor-text/. The weights were trained on a photo of sks tissue-histology-without-tumor using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png) DreamBooth for the text encoder was enabled: True.
AndrewMcDowell/wav2vec2-xls-r-1B-german
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "de", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Ivo22 is a language model designed for processing and exchanging text in Serbian and English. It was trained exclusively on literary works and technical literature for writing code in the Arduino C programming language.
AndrewMcDowell/wav2vec2-xls-r-300m-arabic
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "ar", "dataset:mozilla-foundation/common_voice_7_0", "transformers", "generated_from_trainer", "hf-asr-leaderboard", "mozilla-foundation/common_voice_7_0", "robust-speech-event", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: mit --- ### ahx-beta-45043dc on Stable Diffusion This is the `<ahx-beta-45043dc>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<ahx-beta-45043dc> 0](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/0.jpeg) ![<ahx-beta-45043dc> 1](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/2.jpeg) ![<ahx-beta-45043dc> 2](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/3.jpeg) ![<ahx-beta-45043dc> 3](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/7.jpeg) ![<ahx-beta-45043dc> 4](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/6.jpeg) ![<ahx-beta-45043dc> 5](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/4.jpeg) ![<ahx-beta-45043dc> 6](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/5.jpeg) ![<ahx-beta-45043dc> 7](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/8.jpeg) ![<ahx-beta-45043dc> 8](https://huggingface.co/sd-concepts-library/ahx-beta-45043dc/resolve/main/concept_images/1.jpeg)
Andrey1989/mbert-finetuned-ner_2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model Hakulani/t5-end2end-questions-generation is restricted and you are not in the authorized list. Visit https://huggingface.co/Hakulani/t5-end2end-questions-generation to ask for access.
Andrey1989/mt5-small-finetuned-mlsum-fr
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: unknown duplicated_from: Mintchocowasabi230331/PrivateMYMODEL --- 개인용 모델 모음 2023년 04월 21일까지의 모은 하드의 모델들 / 로라들 / VAE들이 모아져있음
Andrija/RobertaFastBPE
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ratish/DBERT_Fault_v1 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ratish/DBERT_Fault_v1 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.0328 - Validation Loss: 1.1356 - Train Accuracy: 0.5897 - Epoch: 4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1520, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 1.6393 | 1.5483 | 0.3590 | 0 | | 1.5144 | 1.5141 | 0.3590 | 1 | | 1.4025 | 1.4162 | 0.4103 | 2 | | 1.2335 | 1.3052 | 0.5128 | 3 | | 1.0328 | 1.1356 | 0.5897 | 4 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Andrija/SRoBERTa-L-NER
[ "pytorch", "roberta", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
Quantized from https://huggingface.co/Neko-Institute-of-Science/pygmalion-7b
Andrija/SRoBERTa-base
[ "pytorch", "roberta", "fill-mask", "hr", "sr", "multilingual", "dataset:oscar", "dataset:leipzig", "transformers", "masked-lm", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
80
null
--- license: mit --- ### ahx-beta-4504eb3 on Stable Diffusion This is the `<ahx-beta-4504eb3>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<ahx-beta-4504eb3> 0](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/0.jpeg) ![<ahx-beta-4504eb3> 1](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/2.jpeg) ![<ahx-beta-4504eb3> 2](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/3.jpeg) ![<ahx-beta-4504eb3> 3](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/7.jpeg) ![<ahx-beta-4504eb3> 4](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/6.jpeg) ![<ahx-beta-4504eb3> 5](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/4.jpeg) ![<ahx-beta-4504eb3> 6](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/9.jpeg) ![<ahx-beta-4504eb3> 7](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/5.jpeg) ![<ahx-beta-4504eb3> 8](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/8.jpeg) ![<ahx-beta-4504eb3> 9](https://huggingface.co/sd-concepts-library/ahx-beta-4504eb3/resolve/main/concept_images/1.jpeg)
Andrija/SRoBERTaFastBPE-2
[ "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit tags: - generated_from_keras_callback model-index: - name: soumya13/GPT2_CleanDesc_MAKE_v1.5 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # soumya13/GPT2_CleanDesc_MAKE_v1.5 This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0052 - Validation Loss: 0.0002 - Train Accuracy: 1.0 - Epoch: 24 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 7600, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 4.0208 | 1.7346 | 0.3590 | 0 | | 1.5028 | 0.9649 | 0.6923 | 1 | | 0.8469 | 0.4756 | 0.9231 | 2 | | 0.4664 | 0.1764 | 0.9231 | 3 | | 0.2632 | 0.0836 | 0.9744 | 4 | | 0.1579 | 0.0488 | 0.9744 | 5 | | 0.1298 | 0.0250 | 1.0 | 6 | | 0.0962 | 0.0136 | 1.0 | 7 | | 0.0498 | 0.0041 | 1.0 | 8 | | 0.0520 | 0.0022 | 1.0 | 9 | | 0.0418 | 0.0016 | 1.0 | 10 | | 0.0403 | 0.0013 | 1.0 | 11 | | 0.0281 | 0.0009 | 1.0 | 12 | | 0.0236 | 0.0008 | 1.0 | 13 | | 0.0150 | 0.0008 | 1.0 | 14 | | 0.0173 | 0.0007 | 1.0 | 15 | | 0.0160 | 0.0005 | 1.0 | 16 | | 0.0302 | 0.0004 | 1.0 | 17 | | 0.0250 | 0.0003 | 1.0 | 18 | | 0.0069 | 0.0003 | 1.0 | 19 | | 0.0241 | 0.0003 | 1.0 | 20 | | 0.0100 | 0.0003 | 1.0 | 21 | | 0.0114 | 0.0002 | 1.0 | 22 | | 0.0172 | 0.0002 | 1.0 | 23 | | 0.0052 | 0.0002 | 1.0 | 24 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Andry/111
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-05-02T00:30:43Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ratish/DBERT_Fault_v1.1 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ratish/DBERT_Fault_v1.1 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.2870 - Validation Loss: 0.7250 - Train Accuracy: 0.6667 - Epoch: 3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1520, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.6423 | 0.7830 | 0.5128 | 0 | | 0.5827 | 0.6110 | 0.6667 | 1 | | 0.4089 | 0.7688 | 0.6410 | 2 | | 0.2870 | 0.7250 | 0.6667 | 3 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Andy1621/uniformer
[ "license:mit", "has_space" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilgpt2-finetuned-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-wikitext2 This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.6421 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.7602 | 1.0 | 2334 | 3.6669 | | 3.653 | 2.0 | 4668 | 3.6472 | | 3.6006 | 3.0 | 7002 | 3.6421 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
AndyJ/clinicalBERT
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ratish/DBERT_Fault_v1.2 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ratish/DBERT_Fault_v1.2 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0745 - Validation Loss: 0.5163 - Train Accuracy: 0.7949 - Epoch: 8 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 3040, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.6576 | 0.6994 | 0.5128 | 0 | | 0.5903 | 0.5923 | 0.7179 | 1 | | 0.4339 | 0.4675 | 0.7949 | 2 | | 0.3132 | 0.5160 | 0.7179 | 3 | | 0.2338 | 0.5044 | 0.7692 | 4 | | 0.2483 | 0.7382 | 0.6923 | 5 | | 0.1555 | 0.4269 | 0.8462 | 6 | | 0.0881 | 0.4634 | 0.7949 | 7 | | 0.0745 | 0.5163 | 0.7949 | 8 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Ani123/Ani
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_keras_callback model-index: - name: RobCaamano/toxicity_weighted results: [] --- # RobCaamano/toxicity_weighted This model was trained from scratch on Distilbert Base Uncased. It achieves the following results on the evaluation set: - Train Loss: 0.0240 - Train Precision: 0.9522 - Train Recall: 0.9190 - Epoch: 11 ## Model description Finetuned model that uses Distilbert Base Uncased to detect types of toxic text. These include: "toxic", "severe_toxic", "obscene", "threat", "insult" & "identity_hate". ## Intended uses & limitations Intended to classify text into different types of toxicity when it is detected. Trained off a small dataset with underrepresented categories. ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 3e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Precision | Train Recall | Epoch | |:----------:|:---------------:|:------------:|:-----:| | 0.0440 | 0.9059 | 0.8294 | 7 | | 0.0380 | 0.9223 | 0.8632 | 8 | | 0.0314 | 0.9335 | 0.8838 | 9 | | 0.0282 | 0.9437 | 0.9075 | 10 | | 0.0240 | 0.9522 | 0.9190 | 11 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.10.0 - Datasets 2.11.0 - Tokenizers 0.13.3
Anirbanbhk/Hate-speech-Pretrained-movies
[ "tf", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ratish/DBERT_Fault_v1.3 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ratish/DBERT_Fault_v1.3 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.2492 - Validation Loss: 0.6244 - Train Accuracy: 0.6410 - Epoch: 5 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 2128, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.6707 | 0.7045 | 0.5128 | 0 | | 0.6048 | 0.6772 | 0.5897 | 1 | | 0.4599 | 0.4915 | 0.7692 | 2 | | 0.3711 | 0.4883 | 0.6923 | 3 | | 0.3174 | 0.5588 | 0.7436 | 4 | | 0.2492 | 0.6244 | 0.6410 | 5 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Anji/roberta-base-squad2-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - es pipeline_tag: zero-shot-classification license: apache-2.0 tags: - "longformer" - "national library of spain" - "spanish" - "bne" datasets: - "hackathon-pln-es/nli-es" widget: - text: >- Para detener la pandemia, es importante que todos se presenten a vacunarse. candidate_labels: salud, deporte, entretenimiento ---
Ankitha/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - setfit - sentence-transformers - text-classification pipeline_tag: text-classification --- # Likang/OSS-Governance This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("Likang/OSS-Governance") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
Ankitha/DialoGPT-small-harrypottery
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model speedppc/autotrain-beeline-human-v2-54590127716 is restricted and you are not in the authorized list. Visit https://huggingface.co/speedppc/autotrain-beeline-human-v2-54590127716 to ask for access.
Ann2020/distilbert-base-uncased-finetuned-ner
[ "pytorch", "tensorboard", "distilbert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ratish/DBERT_Fault_v1.4 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ratish/DBERT_Fault_v1.4 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.0165 - Validation Loss: 1.1489 - Train Accuracy: 0.7436 - Epoch: 13 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 2128, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.6677 | 0.7030 | 0.5128 | 0 | | 0.6287 | 0.6204 | 0.7436 | 1 | | 0.4746 | 0.4927 | 0.7949 | 2 | | 0.3647 | 0.5168 | 0.7692 | 3 | | 0.2682 | 0.5776 | 0.7436 | 4 | | 0.2184 | 0.4834 | 0.8205 | 5 | | 0.1997 | 0.5296 | 0.7692 | 6 | | 0.1188 | 0.6967 | 0.7949 | 7 | | 0.0945 | 0.6440 | 0.8205 | 8 | | 0.0539 | 0.6911 | 0.7949 | 9 | | 0.0271 | 0.8044 | 0.7949 | 10 | | 0.0242 | 0.7906 | 0.7949 | 11 | | 0.0264 | 0.8078 | 0.8462 | 12 | | 0.0165 | 1.1489 | 0.7436 | 13 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Ann2020/rubert-base-cased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: t5-MCQ-question-generator_val results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-MCQ-question-generator_val This model is a fine-tuned version of [Bilkies/t5-MCQ-question-generator_v1](https://huggingface.co/Bilkies/t5-MCQ-question-generator_v1) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 4 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Anomic/DialoGPT-medium-loki
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: kaz_legal_distilbert_full_corpus_10.0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kaz_legal_distilbert_full_corpus_10.0 This model is a fine-tuned version of [S:\src\pipelines\distil_bert\models\3-epochs-full\model\checkpoint-11000](https://huggingface.co/S:\src\pipelines\distil_bert\models\3-epochs-full\model\checkpoint-11000) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 3.9163 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 6.6228 | 0.32 | 1000 | 6.4585 | | 6.2811 | 0.64 | 2000 | 6.2602 | | 6.1313 | 0.96 | 3000 | 6.1338 | | 5.9671 | 1.28 | 4000 | 5.9722 | | 5.8529 | 1.6 | 5000 | 5.8634 | | 5.7655 | 1.92 | 6000 | 5.7714 | | 5.6925 | 2.24 | 7000 | 5.6978 | | 5.6311 | 2.56 | 8000 | 5.6411 | | 5.5994 | 2.88 | 9000 | 5.6108 | | 5.5758 | 3.2 | 10000 | 5.5260 | | 5.4439 | 3.52 | 11000 | 5.3753 | | 5.3187 | 3.84 | 12000 | 5.2224 | | 5.0263 | 5.12 | 16000 | 4.7246 | | 4.6228 | 6.4 | 20000 | 4.3538 | | 4.31 | 7.68 | 24000 | 4.0828 | | 4.1015 | 8.96 | 28000 | 3.9163 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Anonymous/ReasonBERT-BERT
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="melobron/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Anonymous/ReasonBERT-TAPAS
[ "pytorch", "tapas", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "TapasModel" ], "model_type": "tapas", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - generated_from_trainer metrics: - accuracy model-index: - name: kaz_legal_distilbert_full_corpus_10.0_6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kaz_legal_distilbert_full_corpus_10.0_6 This model is a fine-tuned version of [kaisar-barlybay-sse/kaz_legal_distilbert_full_corpus_10.0](https://huggingface.co/kaisar-barlybay-sse/kaz_legal_distilbert_full_corpus_10.0) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1161 - Accuracy: 0.3812 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.4011 | 1.0 | 501 | 1.3857 | 0.2974 | | 1.3922 | 2.0 | 1002 | 1.4143 | 0.3772 | | 1.285 | 3.0 | 1503 | 1.5145 | 0.3752 | | 1.0606 | 4.0 | 2004 | 1.6871 | 0.3832 | | 0.8706 | 5.0 | 2505 | 2.0051 | 0.3852 | | 0.7318 | 6.0 | 3006 | 2.1161 | 0.3812 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Anonymous0230/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.54 +/- 2.73 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="melobron/taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
AnonymousNLP/pretrained-model-1
[ "pytorch", "gpt2", "transformers" ]
null
{ "architectures": [ "GPT2DoubleHeadsModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-05-02T01:30:17Z
--- tags: - autotrain - summarization language: - en widget: - text: "I love AutoTrain 🤗" datasets: - Adongua/autotrain-data-test3-gam-t5 co2_eq_emissions: emissions: 1.6667833786317627 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 54599127739 - CO2 Emissions (in grams): 1.6668 ## Validation Metrics - Loss: 0.678 - Rouge1: 36.735 - Rouge2: 12.751 - RougeL: 34.758 - RougeLsum: 34.868 - Gen Len: 13.670 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/Adongua/autotrain-test3-gam-t5-54599127739 ```
AnonymousNLP/pretrained-model-2
[ "pytorch", "gpt2", "transformers" ]
null
{ "architectures": [ "GPT2DoubleHeadsModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2023-05-02T01:30:25Z
--- tags: - chatbot --- import nltk import numpy as np import random class Chatbot: def __init__(self, train_data): self.train_data = train_data self.vocabulary = set() self.word_to_index = {} self.index_to_word = {} self.create_vocabulary() self.build_model() def create_vocabulary(self): for sentence in self.train_data: for word in sentence: self.vocabulary.add(word) self.vocabulary = sorted(self.vocabulary) self.word_to_index = {word: i for i, word in enumerate(self.vocabulary)} self.index_to_word = {i: word for i, word in enumerate(self.vocabulary)} def build_model(self): self.num_words = len(self.vocabulary) self.W = np.random.randn(self.num_words, self.num_words) self.b = np.random.randn(self.num_words) def predict(self, sentence): # Convert the sentence to a sequence of indices. indices = [] for word in sentence: indices.append(self.word_to_index[word]) # Calculate the probability of each possible next word. probabilities = np.dot(indices, self.W) + self.b # Choose the word with the highest probability. next_word = self.index_to_word[np.argmax(probabilities)] return next_word def generate_text(self, start_text, max_length=100): sentence = start_text for _ in range(max_length): next_word = self.predict(sentence) sentence += " " + next_word return sentence def respond_to(self, input_text): input_words = nltk.word_tokenize(input_text.lower()) # Check for special commands if input_words[0] == "repeat": return " ".join(input_words[1:]) elif input_words[0] == "generate": start_text = " ".join(input_words[1:]) return self.generate_text(start_text) else: # Find the most similar sentence in the training data. similarity_scores = [] for sentence in self.train_data: similarity_score = nltk.jaccard_distance(set(sentence), set(input_words)) similarity_scores.append(similarity_score) most_similar_index = np.argmin(similarity_scores) most_similar_sentence = self.train_data[most_similar_index] # Generate a response based on the most similar sentence. response = "" for word in most_similar_sentence: response += self.predict([word]) + " " return response.strip() def main(): # Load the training data. train_data = nltk.corpus.reuters.sents() # Create the chatbot. chatbot = Chatbot(train_data) # Start a conversation. print("Chatbot: Hi, I'm a chatbot. What can I help you with?") while True: user_input = input("User: ") if user_input.lower() in ["bye", "goodbye", "exit", "quit"]: print("Chatbot: Goodbye!") break response = chatbot.respond_to(user_input) print("Chatbot:", response) if __name__ == "__main__": main()
AnonymousSub/declutr-biomed-roberta-papers
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
<table> <thead> <tr> <th>Epoch</th> <th>Training Loss</th> <th>Validation Loss</th> <th>Rouge1</th> <th>Rouge2</th> <th>Rougel</th> <th>Rougelsum</th> <th>Gen Len</th> </tr> </thead> <tr> <td>0</td> <td>0.630300</td> <td>0.412157</td> <td>0.417600</td> <td>0.263800</td> <td>0.332800</td> <td>0.406200</td> <td>794.000000</td> </tr> <tr> <td>1</td> <td>0.445600</td> <td>0.371808</td> <td>0.516700</td> <td>0.336200</td> <td>0.415500</td> <td>0.508000</td> <td>560.642900</td> </tr> <tr> <td>2</td> <td>0.398800</td> <td>0.350914</td> <td>0.562700</td> <td>0.375400</td> <td>0.443900</td> <td>0.552700</td> <td>523.714300</td> </tr> <tr> <td>4</td> <td>0.350600</td> <td>0.334888</td> <td>0.553300</td> <td>0.364900</td> <td>0.427100</td> <td>0.538800</td> <td>464.035700</td> </tr> <tr> <td>5</td> <td>0.334300</td> <td>0.326556</td> <td>0.552100</td> <td>0.361400</td> <td>0.429900</td> <td>0.540300</td> <td>517.821400</td> </tr> <tr> <td>6</td> <td>0.322300</td> <td>0.321693</td> <td>0.596600</td> <td>0.400800</td> <td>0.469400</td> <td>0.586400</td> <td>414.892900</td> </tr> <tr> <td>8</td> <td>0.308800</td> <td>0.321562</td> <td>0.594200</td> <td>0.389100</td> <td>0.458500</td> <td>0.581800</td> <td>401.357100</td> </tr> <tr> <td>8</td> <td>0.300100</td> <td>0.319800</td> <td>0.586200</td> <td>0.376100</td> <td>0.453400</td> <td>0.571500</td> <td>381.357100</td> </tr> <tr> <td>9</td> <td>0.291200</td> <td>0.319443</td> <td>0.611500</td> <td>0.399600</td> <td>0.468600</td> <td>0.597500</td> <td>368.821400</td> </tr> <tr> <td>10</td> <td>0.282900</td> <td>0.318927</td> <td>0.593200</td> <td>0.388700</td> <td>0.459100</td> <td>0.579800</td> <td>354.285700</td> </tr> <tr> <td>12</td> <td>0.273700</td> <td>0.319651</td> <td>0.594000</td> <td>0.394200</td> <td>0.457000</td> <td>0.580800</td> <td>386.785700</td> </tr> <tr> <td>12</td> <td>0.268100</td> <td>0.315178</td> <td>0.603700</td> <td>0.396100</td> <td>0.465300</td> <td>0.588500</td> <td>365.714300</td> </tr> <tr> <td>13</td> <td>0.262000</td> <td>0.312819</td> <td>0.601500</td> <td>0.402800</td> <td>0.471700</td> <td>0.586000</td> <td>377.250000</td> </tr> <tr> <td>14</td> <td>0.254900</td> <td>0.316255</td> <td>0.601200</td> <td>0.397600</td> <td>0.469700</td> <td>0.587900</td> <td>353.071400</td> </tr> <tr> <td>16</td> <td>0.248500</td> <td>0.316413</td> <td>0.610300</td> <td>0.407900</td> <td>0.476000</td> <td>0.597400</td> <td>341.464300</td> </tr> <tr> <td>16</td> <td>0.243600</td> <td>0.315982</td> <td>0.611400</td> <td>0.404900</td> <td>0.483200</td> <td>0.598300</td> <td>379.571400</td> </tr> <tr> <td>17</td> <td>0.238900</td> <td>0.318108</td> <td>0.608100</td> <td>0.408200</td> <td>0.486100</td> <td>0.594000</td> <td>375.964300</td> </tr> <tr> <td>18</td> <td>0.233900</td> <td>0.317792</td> <td>0.600200</td> <td>0.406300</td> <td>0.471700</td> <td>0.587600</td> <td>346.964300</td> </tr> <tr> <td>19</td> <td>0.229600</td> <td>0.322435</td> <td>0.599100</td> <td>0.407100</td> <td>0.479600</td> <td>0.586600</td> <td>362.571400</td> </tr> </table>
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1_wikiqa_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 580.50 +/- 192.75 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Felix555 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga Felix555 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga Felix555 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-finetuned-lr1e-07-epochs25 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-finetuned-lr1e-07-epochs25 This model is a fine-tuned version of [distilbert-base-cased-distilled-squad](https://huggingface.co/distilbert-base-cased-distilled-squad) on the None dataset. It achieves the following results on the evaluation set: - Loss: 5.4974 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-07 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 25 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 10 | 6.3781 | | No log | 2.0 | 20 | 6.2766 | | No log | 3.0 | 30 | 6.1849 | | No log | 4.0 | 40 | 6.1020 | | No log | 5.0 | 50 | 6.0250 | | No log | 6.0 | 60 | 5.9616 | | No log | 7.0 | 70 | 5.9008 | | No log | 8.0 | 80 | 5.8449 | | No log | 9.0 | 90 | 5.7978 | | No log | 10.0 | 100 | 5.7540 | | No log | 11.0 | 110 | 5.7150 | | No log | 12.0 | 120 | 5.6789 | | No log | 13.0 | 130 | 5.6482 | | No log | 14.0 | 140 | 5.6217 | | No log | 15.0 | 150 | 5.5974 | | No log | 16.0 | 160 | 5.5775 | | No log | 17.0 | 170 | 5.5601 | | No log | 18.0 | 180 | 5.5449 | | No log | 19.0 | 190 | 5.5323 | | No log | 20.0 | 200 | 5.5215 | | No log | 21.0 | 210 | 5.5128 | | No log | 22.0 | 220 | 5.5062 | | No log | 23.0 | 230 | 5.5013 | | No log | 24.0 | 240 | 5.4983 | | No log | 25.0 | 250 | 5.4974 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 57.04 +/- 104.43 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters ```python {'exp_name': 'train' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'LunarLander-v2' 'total_timesteps': 1000000 'learning_rate': 0.0002 'num_envs': 16 'num_steps': 512 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 32 'update_epochs': 10 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'repo_id': 'dungtd2403/ppo-Lunarlander-v1-unit8' 'batch_size': 8192 'minibatch_size': 256} ```
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - generated_from_trainer datasets: - sem_eval_2018_task_1 metrics: - f1 - accuracy model-index: - name: arabert-analysis results: - task: name: Text Classification type: text-classification dataset: name: sem_eval_2018_task_1 type: sem_eval_2018_task_1 config: subtask5.arabic split: validation args: subtask5.arabic metrics: - name: F1 type: f1 value: 0.6712830957230141 - name: Accuracy type: accuracy value: 0.2547008547008547 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # arabert-analysis This model is a fine-tuned version of [aubmindlab/bert-large-arabertv02](https://huggingface.co/aubmindlab/bert-large-arabertv02) on the sem_eval_2018_task_1 dataset. It achieves the following results on the evaluation set: - Loss: 0.2997 - F1: 0.6713 - Roc Auc: 0.7730 - Accuracy: 0.2547 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 20 - eval_batch_size: 20 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Roc Auc | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:-------:|:--------:| | No log | 1.0 | 114 | 0.4142 | 0.3601 | 0.6079 | 0.0889 | | No log | 2.0 | 228 | 0.3328 | 0.6358 | 0.7492 | 0.2205 | | No log | 3.0 | 342 | 0.3123 | 0.6625 | 0.7659 | 0.2444 | | No log | 4.0 | 456 | 0.3015 | 0.6716 | 0.7724 | 0.2496 | | 0.3612 | 5.0 | 570 | 0.2997 | 0.6713 | 0.7730 | 0.2547 | ### Framework versions - Transformers 4.29.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - conversational --- #trail-2 chatbot model cricket