{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n--------------------------------------------------------------------------------------------------\n\n

Introduction

\n\nThis model is a lightweight and uncased version of MiniLM [1] for the Italian language. Its 17M parameters and 67MB size make it\n85% lighter than a typical mono-lingual BERT model. It is ideal when memory consumption and execution speed are critical while maintaining high-quality results.\n\n\n

Model description

\n\nThe model builds on mMiniLMv2 [1] (from Microsoft: [L6xH384 mMiniLMv2](https://github.com/microsoft/unilm/tree/master/minilm)) as a starting point, \nfocusing it on the Italian language while at the same time turning it into an uncased model by modifying the embedding layer \n(as in [2], but computing document-level frequencies over the Wikipedia dataset and setting a frequency threshold of 0.1%), which brings a considerable\nreduction in the number of parameters.\n\nTo compensate for the deletion of cased tokens, which now forces the model to exploit lowercase representations of words previously capitalized, \nthe model has been further pre-trained on the Italian split of the [Wikipedia](https://huggingface.co/datasets/wikipedia) dataset, using the whole word masking [3] technique to make it more robust \nto the new uncased representations.\n\nThe resulting model has 17M parameters, a vocabulary of 14.610 tokens, and a size of 67MB, which makes it 85% lighter than a typical mono-lingual BERT model and\n75% lighter than a standard mono-lingual DistilBERT model.\n\n\n

Training procedure

\n\nThe model has been trained for masked language modeling on the Italian Wikipedia (~3GB) dataset for 10K steps, using the AdamW optimizer, with a batch size of 512 \n(obtained through 128 gradient accumulation steps),\na sequence length of 512, and a linearly decaying learning rate starting from 5e-5. The training has been performed using dynamic masking between epochs and\nexploiting the whole word masking technique.\n\n\n

Performances

\n\nThe following metrics have been computed on the Part of Speech Tagging and Named Entity Recognition tasks, using the UD Italian ISDT and WikiNER datasets, respectively. \nThe PoST model has been trained for 5 epochs, and the NER model for 3 epochs, both with a constant learning rate, fixed at 1e-5. For Part of Speech Tagging, the metrics have been computed on the default test set\nprovided with the dataset, while for Named Entity Recognition the metrics have been computed with a 5-fold cross-validation\n\n| Task | Recall | Precision | F1 |\n| ------ | ------ | ------ | ------ |\n| Part of Speech Tagging | 95.64 | 95.32 | 95.45 |\n| Named Entity Recognition | 82.27 | 80.64 | 81.29 |\n\nThe metrics have been computed at the token level and macro-averaged over the classes.\n\n

Demo

\n\nYou can try the model online (fine-tuned on named entity recognition) using this web app: https://huggingface.co/spaces/osiria/flare-it-demo\n\n

Quick usage

\n\n```python\nfrom transformers import AutoTokenizer, XLMRobertaForMaskedLM\nfrom transformers import pipeline\n\ntokenizer = AutoTokenizer.from_pretrained(\"osiria/flare-it\")\nmodel = XLMRobertaForMaskedLM.from_pretrained(\"osiria/flare-it\")\npipeline_mlm = pipeline(task=\"fill-mask\", model=model, tokenizer=tokenizer)\n```\n\n\n

Limitations

\n\nThis lightweight model has been further pre-trained on Wikipedia, so it's particularly suitable as an agile analyzer for large volumes of natively digital text \nfrom the world wide web, written in a correct and fluent form (like wikis, web pages, news, etc.). However, it may show limitations when it comes to chaotic text, containing errors and slang expressions\n(like social media posts) or when it comes to domain-specific text (like medical, financial or legal content).\n\n

References

\n\n[1] https://arxiv.org/abs/2012.15828\n\n[2] https://arxiv.org/abs/2010.05609\n\n[3] https://arxiv.org/abs/1906.08101\n\n

License

\n\nThe model is released under MIT license"}}},{"rowIdx":28078,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-da-pos-egy"},"tags":{"kind":"list like","value":["pytorch","tf","bert","token-classification","ar","arxiv:2103.06678","transformers","license:apache-2.0","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"tf\",\n \"bert\",\n \"token-classification\",\n \"ar\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"license:apache-2.0\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"token-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForTokenClassification\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":32,"string":"32"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\ninference: \n parameters: \n do_sample: true\n max_length: 384\n top_p: 0.9\n repetition_penalty: 1.0\nlanguage: \n - en\nlicense: mit\ntags: \n - \"text2text generation\"\ntask: \n name: \"lyrics interpretation\"\n type: \"text2text generation\"\nwidget: \n - \n text: \"Explain: \\nLoving him is like driving a new Maserati down a dead end street\\nFaster than the wind, passionate as sin, ending so suddenly\\nLoving him is like trying to change your mind\\nOnce you're already flying through the free fall\\nLike the colors in autumn, so bright, just before they lose it all\\n\\nLosing him was blue, like I'd never known\\nMissing him was dark gray, all alone\\nForgetting him was like trying to know\\nSomebody you never met\\nBut loving him was red\\nLoving him was red\\n\\nTouching him was like realizing all you ever wanted\\nWas right there in front of you\\nMemorizing him was as easy as knowing all the words\\nTo your old favorite song\\nFighting with him was like trying to solve a crossword\\nAnd realizing there's no right answer\\nRegretting him was like wishing you never found out\\nThat love could be that strong\\n\\nLosing him was blue, like I'd never known\\nMissing him was dark gray, all alone\\nForgetting him was like trying to know\\nSomebody you never met\\nBut loving him was red\\nOh, red\\nBurning red\\n\\nRemembering him comes in flashbacks and echoes\\nTell myself it's time now gotta let go\\nBut moving on from him is impossible\\nWhen I still see it all in my head\\nIn burning red\\nBurning, it was red\\n\\nOh, losing him was blue, like I'd never\\nnown\\nMissing him was dark gray, all alone\\nForgetting him was like trying to know\\nSomebody you never met\\n'Cause loving him was red\\nYeah, yeah, red\\nBurning red\\n\\nAnd that's why he's spinning 'round in my head\\nComes back to me, burning red\\nYeah, yeah\\nHis love was like driving a new Maserati down a dead end street\"\n example_title: \"Red - Taylor Swift\"\n---\n\n\n# Overview\nThis pilot hub aims to test whether a flan-t5-base can effectively automate poem interpretation. \nTo use the hub, simply paste in any poem of interest and see its meaning. Please begin your request with the prompt, 'Explain: '."}}},{"rowIdx":28079,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-da-pos-msa"},"tags":{"kind":"list like","value":["pytorch","tf","bert","token-classification","ar","arxiv:2103.06678","transformers","license:apache-2.0","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"tf\",\n \"bert\",\n \"token-classification\",\n \"ar\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"license:apache-2.0\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"token-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForTokenClassification\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":27,"string":"27"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\ninference: \n parameters: \n do_sample: true\n max_length: 384\n top_p: 0.9\n repetition_penalty: 1.0\nlanguage: \n - en\nlicense: mit\ntags: \n - \"text2text generation\"\ntask: \n name: \"poem interpretation\"\n type: \"text2text generation\"\nwidget: \n - \n text: \"Explain: \\nThe Lost Boy\\n\\nBoy it really stinks in here\\nThe dumpster is not the place\\nTo get the food you need each day\\nJust to feed your face.\\n\\nA ten-year-old with a dirty face\\nCrawls out with his daily meal\\nWhat is he doing in this place\\nHow am I suppose to feel?\\n\\nHis mother cradles a baby \\nThe child's been dead three weeks\\nHer mind is gone from drug abuse\\nAnd now she hardly speaks.\\n\\nGrandma is a drunkard\\nWith men who come to town\\nBringing her a bottle\\nJust to go a round.\\n\\nDrugs out on the table \\nA line or two is good\\nThat should carry her over \\nNo one ever understood.\\n\\nThe little boy with dirty face\\nHas not been schooled in years\\nHe fights the streets alone\\nLong since lost his fears.\\n\\nA stale sandwich, and watered coke\\nHis meal for this day\\nWhatever tomorrow may bring\\nHe knows not the word play.\\n\\nEmaciated with distant eyes\\nNo one really sees him\\nJust one of the lost boys\\nHis life completely grim.\\n\\nGod bless the children!\\n\\n\"\n example_title: \"The Lost Boy - pattyann4500 (allpoetry.com/920731)\"\n - \n text: \"Explain: \\nLet your breath be the air I need,\\nwhen I drown in your eyes as I see.\\nLet yourself fall into my arms that bleed,\\nwhen the world shows you no mercy.\\n\\nLet your sad past bury down in the core,\\nwhen you walk with your heart close to me.\\nLet there be your lovely face at the door,\\nWhen I return from the war no matter how long it be.\\n\\nLet your love nourish my frozen heart,\\nwhen it lies under the snow capped tree.\\nLet me be enslaved with you forever from the start,\\nwhen the time comes, together we shall flee.\\n\\nLet your presence enlighten my dark,\\nwhen your smile reflects in the sea.\\nLet the words of love be thy spark,\\nwhen you come out of dreams to kiss me.\\n\\nI wish we were together... my princess... \\n\"\n example_title: \"Princess... - Soulhealer95 (allpoetry.com/11038949)\"\n---\n\n\n# Overview\nThe aim of this pilot hub is to test whether a Flan-T5-Base model, when pre-trained with a lyrics interpretation task, can better interpret poems.\nTo use the hub, simply paste in any poem of interest and see its meaning. Please begin your request with the prompt, 'Explain: '."}}},{"rowIdx":28080,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-da"},"tags":{"kind":"list like","value":["pytorch","tf","jax","bert","fill-mask","ar","arxiv:2103.06678","transformers","license:apache-2.0","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"tf\",\n \"jax\",\n \"bert\",\n \"fill-mask\",\n \"ar\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"license:apache-2.0\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"fill-mask"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForMaskedLM\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":449,"string":"449"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: stable-baselines3\ntags:\n- LunarLander-v2\n- deep-reinforcement-learning\n- reinforcement-learning\n- stable-baselines3\nmodel-index:\n- name: PPO\n results:\n - task:\n type: reinforcement-learning\n name: reinforcement-learning\n dataset:\n name: LunarLander-v2\n type: LunarLander-v2\n metrics:\n - type: mean_reward\n value: 257.28 +/- 16.62\n name: mean_reward\n verified: false\n---\n\n# **PPO** Agent playing **LunarLander-v2**\nThis is a trained model of a **PPO** agent playing **LunarLander-v2**\nusing the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).\n\n## Usage (with Stable-baselines3)\nTODO: Add your code\n\n\n```python\nfrom stable_baselines3 import ...\nfrom huggingface_sb3 import load_from_hub\n\n...\n```\n"}}},{"rowIdx":28081,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar-corpus6"},"tags":{"kind":"list like","value":["pytorch","tf","bert","text-classification","ar","arxiv:2103.06678","transformers","license:apache-2.0"],"string":"[\n \"pytorch\",\n \"tf\",\n \"bert\",\n \"text-classification\",\n \"ar\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"license:apache-2.0\"\n]"},"pipeline_tag":{"kind":"string","value":"text-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForSequenceClassification\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":34,"string":"34"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\ndatasets:\n- logo-wizard/modern-logo-dataset\ntags:\n- text-to-image\n- lora\n- stable-diffusion\npipeline_tag: text-to-image\nlicense: creativeml-openrail-m\n---\n# LoRA text2image fine-tuning - eewwann/logo-diffusion-lora-v10\nThese are LoRA with Hadamard Product (LoHa) adaption weights for [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-nonema-pruned.safetensors). The weights were fine-tuned on the [logo-wizard/modern-logo-dataset](https://huggingface.co/datasets/logo-wizard/modern-logo-dataset) dataset. You can find some example images in the following. \n\n![img_0](./image_0.jpg)\n![img_1](./image_1.jpg)\n![img_2](./image_2.jpg)\n![img_3](./image_3.jpg)"}}},{"rowIdx":28082,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry"},"tags":{"kind":"list like","value":["pytorch","tf","bert","text-classification","ar","arxiv:1905.05700","arxiv:2103.06678","transformers","license:apache-2.0"],"string":"[\n \"pytorch\",\n \"tf\",\n \"bert\",\n \"text-classification\",\n \"ar\",\n \"arxiv:1905.05700\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"license:apache-2.0\"\n]"},"pipeline_tag":{"kind":"string","value":"text-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForSequenceClassification\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":31,"string":"31"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\ntags:\n- Taxi-v3\n- q-learning\n- reinforcement-learning\n- custom-implementation\nmodel-index:\n- name: q-Taxi-v3\n results:\n - task:\n type: reinforcement-learning\n name: reinforcement-learning\n dataset:\n name: Taxi-v3\n type: Taxi-v3\n metrics:\n - type: mean_reward\n value: 7.50 +/- 2.76\n name: mean_reward\n verified: false\n---\n\n # **Q-Learning** Agent playing1 **Taxi-v3**\n This is a trained model of a **Q-Learning** agent playing **Taxi-v3** .\n\n ## Usage\n\n ```python\n \n model = load_from_hub(repo_id=\"yasndr/q-Taxi-v3\", filename=\"q-learning.pkl\")\n\n # Don't forget to check if you need to add additional attributes (is_slippery=False etc)\n env = gym.make(model[\"env_id\"])\n ```\n "}}},{"rowIdx":28083,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-msa"},"tags":{"kind":"list like","value":["pytorch","tf","bert","token-classification","ar","arxiv:2103.06678","transformers","license:apache-2.0","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"tf\",\n \"bert\",\n \"token-classification\",\n \"ar\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"license:apache-2.0\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"token-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForTokenClassification\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":1862,"string":"1,862"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"## Psychology-Alpaca-RM\n- PEFT adapter layers for a reward model based on ``decapoda-research/llama-7b-hf``. \n- Trained with a small subset (110 data points) of ``samhog/cgpt-pairs`` with 10K prompts, each with two answers (one 'good', one 'bad')"}}},{"rowIdx":28084,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-mix"},"tags":{"kind":"list like","value":["pytorch","tf","jax","bert","fill-mask","ar","arxiv:2103.06678","transformers","Arabic","Dialect","Egyptian","Gulf","Levantine","Classical Arabic","MSA","Modern Standard Arabic","license:apache-2.0","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"tf\",\n \"jax\",\n \"bert\",\n \"fill-mask\",\n \"ar\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"Arabic\",\n \"Dialect\",\n \"Egyptian\",\n \"Gulf\",\n \"Levantine\",\n \"Classical Arabic\",\n \"MSA\",\n \"Modern Standard Arabic\",\n \"license:apache-2.0\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"fill-mask"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForMaskedLM\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":20880,"string":"20,880"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: stable-baselines3\ntags:\n- LunarLander-v2\n- deep-reinforcement-learning\n- reinforcement-learning\n- stable-baselines3\nmodel-index:\n- name: PPO\n results:\n - task:\n type: reinforcement-learning\n name: reinforcement-learning\n dataset:\n name: LunarLander-v2\n type: LunarLander-v2\n metrics:\n - type: mean_reward\n value: 259.59 +/- 19.34\n name: mean_reward\n verified: false\n---\n\n# **PPO** Agent playing **LunarLander-v2**\nThis is a trained model of a **PPO** agent playing **LunarLander-v2**\nusing the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).\n\n## Usage (with Stable-baselines3)\nTODO: Add your code\n\n\n```python\nfrom stable_baselines3 import ...\nfrom huggingface_sb3 import load_from_hub\n\n...\n```\n"}}},{"rowIdx":28085,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-msa-poetry"},"tags":{"kind":"list like","value":["pytorch","tf","bert","text-classification","ar","arxiv:1905.05700","arxiv:2103.06678","transformers","license:apache-2.0"],"string":"[\n \"pytorch\",\n \"tf\",\n \"bert\",\n \"text-classification\",\n \"ar\",\n \"arxiv:1905.05700\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"license:apache-2.0\"\n]"},"pipeline_tag":{"kind":"string","value":"text-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForSequenceClassification\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":25,"string":"25"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlanguage: en\nthumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true\ntags:\n- huggingtweets\nwidget:\n- text: \"My dream is\"\n---\n\n
\n
\n \n
\n \n
\n \n
\n
\n
🤖 AI BOT 🤖
\n
Scratch Team
\n
@scratch
\n\n\nI was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).\n\nCreate your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!\n\n## How does it work?\n\nThe model uses the following pipeline.\n\n![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true)\n\nTo understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).\n\n## Training data\n\nThe model was trained on tweets from Scratch Team.\n\n| Data | Scratch Team |\n| --- | --- |\n| Tweets downloaded | 3161 |\n| Retweets | 2028 |\n| Short tweets | 4 |\n| Tweets kept | 1129 |\n\n[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/qnkb8q9j/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.\n\n## Training procedure\n\nThe model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @scratch's tweets.\n\nHyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1yt6szut) for full transparency and reproducibility.\n\nAt the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1yt6szut/artifacts) is logged and versioned.\n\n## How to use\n\nYou can use this model directly with a pipeline for text generation:\n\n```python\nfrom transformers import pipeline\ngenerator = pipeline('text-generation',\n model='huggingtweets/scratch')\ngenerator(\"My dream is\", num_return_sequences=5)\n```\n\n## Limitations and bias\n\nThe model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).\n\nIn addition, the data present in the user's tweets further affects the text generated by the model.\n\n## About\n\n*Built by Boris Dayma*\n\n[![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma)\n\nFor more details, visit the project repository.\n\n[![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)\n"}}},{"rowIdx":28086,"cells":{"modelId":{"kind":"string","value":"CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth"},"tags":{"kind":"list like","value":["pytorch","tf","jax","bert","fill-mask","ar","arxiv:2103.06678","transformers","license:apache-2.0","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"tf\",\n \"jax\",\n \"bert\",\n \"fill-mask\",\n \"ar\",\n \"arxiv:2103.06678\",\n \"transformers\",\n \"license:apache-2.0\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"fill-mask"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForMaskedLM\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":26,"string":"26"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlanguage: en\nthumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true\ntags:\n- huggingtweets\nwidget:\n- text: \"My dream is\"\n---\n\n
\n
\n \n
\n \n
\n \n \n \n
🤖 AI BOT 🤖
\n
Chris Uri
\n
@redcloudnimbus
\n\n\nI was made with [huggingtweets](https://github.com/borisdayma/huggingtweets).\n\nCreate your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)!\n\n## How does it work?\n\nThe model uses the following pipeline.\n\n![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true)\n\nTo understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI).\n\n## Training data\n\nThe model was trained on tweets from Chris Uri.\n\n| Data | Chris Uri |\n| --- | --- |\n| Tweets downloaded | 1359 |\n| Retweets | 208 |\n| Short tweets | 199 |\n| Tweets kept | 952 |\n\n[Explore the data](https://wandb.ai/wandb/huggingtweets/runs/p68z097t/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.\n\n## Training procedure\n\nThe model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @redcloudnimbus's tweets.\n\nHyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/s8pwy6bb) for full transparency and reproducibility.\n\nAt the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/s8pwy6bb/artifacts) is logged and versioned.\n\n## How to use\n\nYou can use this model directly with a pipeline for text generation:\n\n```python\nfrom transformers import pipeline\ngenerator = pipeline('text-generation',\n model='huggingtweets/redcloudnimbus')\ngenerator(\"My dream is\", num_return_sequences=5)\n```\n\n## Limitations and bias\n\nThe model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias).\n\nIn addition, the data present in the user's tweets further affects the text generated by the model.\n\n## About\n\n*Built by Boris Dayma*\n\n[![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma)\n\nFor more details, visit the project repository.\n\n[![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)\n"}}},{"rowIdx":28087,"cells":{"modelId":{"kind":"string","value":"CAUKiel/JavaBERT-uncased"},"tags":{"kind":"list like","value":["pytorch","safetensors","bert","fill-mask","java","code","transformers","license:apache-2.0","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"safetensors\",\n \"bert\",\n \"fill-mask\",\n \"java\",\n \"code\",\n \"transformers\",\n \"license:apache-2.0\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"fill-mask"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForMaskedLM\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":7,"string":"7"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmetrics:\n- accuracy\nmodel-index:\n- name: expert-freelaw\n results: []\n---\n\n\n\n# expert-freelaw\n\nThis model is a fine-tuned version of [EleutherAI/pythia-1b-deduped](https://huggingface.co/EleutherAI/pythia-1b-deduped) on the None dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 2.0413\n- Accuracy: 0.5643\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 1\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 8\n- total_train_batch_size: 64\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- training_steps: 1000\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| 2.0772 | 0.01 | 200 | 2.0728 | 0.5588 |\n| 2.0718 | 0.01 | 400 | 2.0656 | 0.5600 |\n| 2.0661 | 0.02 | 600 | 2.0561 | 0.5617 |\n| 2.0606 | 0.03 | 800 | 2.0472 | 0.5632 |\n| 2.0514 | 0.04 | 1000 | 2.0413 | 0.5643 |\n\n\n### Framework versions\n\n- Transformers 4.28.1\n- Pytorch 2.0.0+cu117\n- Datasets 2.11.0\n- Tokenizers 0.13.3\n"}}},{"rowIdx":28088,"cells":{"modelId":{"kind":"string","value":"CAUKiel/JavaBERT"},"tags":{"kind":"list like","value":["pytorch","safetensors","bert","fill-mask","code","arxiv:2110.10404","arxiv:1910.09700","transformers","license:apache-2.0","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"safetensors\",\n \"bert\",\n \"fill-mask\",\n \"code\",\n \"arxiv:2110.10404\",\n \"arxiv:1910.09700\",\n \"transformers\",\n \"license:apache-2.0\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"fill-mask"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForMaskedLM\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":388,"string":"388"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\ntags:\n- FrozenLake-v1-4x4-no_slippery\n- q-learning\n- reinforcement-learning\n- custom-implementation\nmodel-index:\n- name: q-FrozenLake-v1-4x4-noSlippery\n results:\n - task:\n type: reinforcement-learning\n name: reinforcement-learning\n dataset:\n name: FrozenLake-v1-4x4-no_slippery\n type: FrozenLake-v1-4x4-no_slippery\n metrics:\n - type: mean_reward\n value: 1.00 +/- 0.00\n name: mean_reward\n verified: false\n---\n\n # **Q-Learning** Agent playing1 **FrozenLake-v1**\n This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** .\n\n ## Usage\n\n ```python\n \n model = load_from_hub(repo_id=\"JacksonBurton/q-FrozenLake-v1-4x4-noSlippery\", filename=\"q-learning.pkl\")\n\n # Don't forget to check if you need to add additional attributes (is_slippery=False etc)\n env = gym.make(model[\"env_id\"])\n ```\n "}}},{"rowIdx":28089,"cells":{"modelId":{"kind":"string","value":"Cameron/BERT-eec-emotion"},"tags":{"kind":"list like","value":["pytorch","jax","bert","text-classification","transformers"],"string":"[\n \"pytorch\",\n \"jax\",\n \"bert\",\n \"text-classification\",\n \"transformers\"\n]"},"pipeline_tag":{"kind":"string","value":"text-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForSequenceClassification\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":36,"string":"36"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlicense: apache-2.0\ntags:\n- generated_from_trainer\ndatasets:\n- glue\nmetrics:\n- matthews_correlation\nmodel-index:\n- name: bert-base-uncased-finetuned-cola\n results:\n - task:\n name: Text Classification\n type: text-classification\n dataset:\n name: glue\n type: glue\n config: cola\n split: validation\n args: cola\n metrics:\n - name: Matthews Correlation\n type: matthews_correlation\n value: 0.5365007161029405\n---\n\n\n\n# bert-base-uncased-finetuned-cola\n\nThis model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.4711\n- Matthews Correlation: 0.5365\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 9.678498850368218e-06\n- train_batch_size: 32\n- eval_batch_size: 4\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |\n|:-------------:|:-----:|:----:|:---------------:|:--------------------:|\n| No log | 1.0 | 268 | 0.4731 | 0.4664 |\n| 0.4819 | 2.0 | 536 | 0.4537 | 0.5233 |\n| 0.4819 | 3.0 | 804 | 0.4711 | 0.5365 |\n\n\n### Framework versions\n\n- Transformers 4.28.1\n- Pytorch 2.0.0+cu118\n- Datasets 2.12.0\n- Tokenizers 0.13.3\n"}}},{"rowIdx":28090,"cells":{"modelId":{"kind":"string","value":"Cameron/BERT-mdgender-wizard"},"tags":{"kind":"list like","value":["pytorch","jax","bert","text-classification","transformers"],"string":"[\n \"pytorch\",\n \"jax\",\n \"bert\",\n \"text-classification\",\n \"transformers\"\n]"},"pipeline_tag":{"kind":"string","value":"text-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForSequenceClassification\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":30,"string":"30"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: \"transformers.js\"\n---\n\nhttps://huggingface.co/openai/whisper-small.en with ONNX weights to be compatible with Transformers.js.\n\nNote: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`)."}}},{"rowIdx":28091,"cells":{"modelId":{"kind":"string","value":"Canadiancaleb/DialoGPT-small-jesse"},"tags":{"kind":"list like","value":["pytorch","gpt2","text-generation","transformers","conversational"],"string":"[\n \"pytorch\",\n \"gpt2\",\n \"text-generation\",\n \"transformers\",\n \"conversational\"\n]"},"pipeline_tag":{"kind":"string","value":"conversational"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"GPT2LMHeadModel\"\n ],\n \"model_type\": \"gpt2\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": 1000\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":9,"string":"9"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlicense: apache-2.0\ntags:\n- summarization\n- generated_from_trainer\nmetrics:\n- rouge\nmodel-index:\n- name: mt5-small-finetuned-amazon-en-es\n results: []\n---\n\n\n\n# mt5-small-finetuned-amazon-en-es\n\nThis model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 3.0132\n- Rouge1: 16.4719\n- Rouge2: 7.9366\n- Rougel: 16.2123\n- Rougelsum: 16.2853\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5.6e-05\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 8\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum |\n|:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|\n| 3.9249 | 1.0 | 1209 | 3.1904 | 15.8207 | 8.0555 | 15.4584 | 15.648 |\n| 3.5688 | 2.0 | 2418 | 3.0812 | 16.3271 | 8.1479 | 15.9001 | 16.0134 |\n| 3.3905 | 3.0 | 3627 | 3.0442 | 15.9864 | 7.295 | 15.4247 | 15.5848 |\n| 3.2728 | 4.0 | 4836 | 3.0304 | 16.2893 | 7.5851 | 15.9494 | 16.0117 |\n| 3.1958 | 5.0 | 6045 | 3.0169 | 15.4888 | 7.4495 | 15.2244 | 15.2326 |\n| 3.1359 | 6.0 | 7254 | 3.0158 | 16.3866 | 8.2218 | 16.0625 | 16.0953 |\n| 3.1059 | 7.0 | 8463 | 3.0075 | 15.9134 | 7.8387 | 15.626 | 15.6499 |\n| 3.0852 | 8.0 | 9672 | 3.0132 | 16.4719 | 7.9366 | 16.2123 | 16.2853 |\n\n\n### Framework versions\n\n- Transformers 4.26.1\n- Pytorch 1.13.1+cu117\n- Datasets 2.12.0\n- Tokenizers 0.13.2\n"}}},{"rowIdx":28092,"cells":{"modelId":{"kind":"string","value":"Canadiancaleb/jessebot"},"tags":{"kind":"list like","value":[],"string":"[]"},"pipeline_tag":{"kind":"null"},"config":{"kind":"string","value":"{\n \"architectures\": null,\n \"model_type\": null,\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":0,"string":"0"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlicense: openrail\ntags:\n- scat\n- lora\n- stable diffusion\n---\nHere's the defecation lora, it was available on Civitai until the ban on scat content.\nYou can use various trigger words to get different effects, like \"Scat\", \"Disposal\", \"Feces\" and so on.\nThe main problem with this model is that that it tends to confuse the anus and the vagina, so you'll have to add prompts and negatives usefull to reduce this effect.\n\nYou can find my other models on Civitai: https://civitai.com/user/JollyIm/models\n\nA first example:\n![00257-695011179.png](https://s3.amazonaws.com/moonup/production/uploads/64133dd16cd62eb3ba1f8a60/NwGgNExRAgOg1UTVOU9OB.png)\nPrompts: Realistic, Realism, (Masterpiece, Best Quality, High Quality, Highres:1.4), Detailed, Extremely Detailed, Ambient Soft Lighting, 4K, (Extremely Detailed Eyes, Detailed Face and Skin:1.2), masterpiece, best quality, 1girl, feces, disposal, (anal:1.2), , (public toilet), embarassed, (pile of feces), (perfect pussy), (perfect vagina),\nNegative prompt: easynegative, (worst quality:1.2), (low quality:1.2), (vaginal), (dirty vagina:1.2), (feces in vagina:1.2), (feces in vagina:1.2)\n\nSecond example:\n![download.png](https://s3.amazonaws.com/moonup/production/uploads/64133dd16cd62eb3ba1f8a60/Zp8ZGoXJTL52_mtvMEXS2.png)\nPrompts: masterpiece, best quality, 1girl, scat, (anal:1.2), , (toilet), from behind,\nNegative prompt: easynegative, (worst quality:1.2), (low quality:1.2), (vaginal), (dirty vagina:1.2), (scat in vagina:1.2), (feces in vagina:1.2)\n"}}},{"rowIdx":28093,"cells":{"modelId":{"kind":"string","value":"CapitainData/wav2vec2-large-xlsr-turkish-demo-colab"},"tags":{"kind":"list like","value":[],"string":"[]"},"pipeline_tag":{"kind":"null"},"config":{"kind":"string","value":"{\n \"architectures\": null,\n \"model_type\": null,\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":0,"string":"0"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: \"transformers.js\"\n---\n\nhttps://huggingface.co/facebook/nllb-200-distilled-600M with ONNX weights to be compatible with Transformers.js.\n\nNote: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`)."}}},{"rowIdx":28094,"cells":{"modelId":{"kind":"string","value":"Capreolus/birch-bert-large-msmarco_mb"},"tags":{"kind":"list like","value":["pytorch","tf","jax","bert","next-sentence-prediction","transformers"],"string":"[\n \"pytorch\",\n \"tf\",\n \"jax\",\n \"bert\",\n \"next-sentence-prediction\",\n \"transformers\"\n]"},"pipeline_tag":{"kind":"null"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"BertForNextSentencePrediction\"\n ],\n \"model_type\": \"bert\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":1,"string":"1"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlanguage:\n- he\nmetrics:\n- accuracy\nlibrary_name: transformers\npipeline_tag: text-classification\ntags:\n- legal\n---"}}},{"rowIdx":28095,"cells":{"modelId":{"kind":"string","value":"Capreolus/electra-base-msmarco"},"tags":{"kind":"list like","value":["pytorch","tf","electra","text-classification","arxiv:2008.09093","transformers"],"string":"[\n \"pytorch\",\n \"tf\",\n \"electra\",\n \"text-classification\",\n \"arxiv:2008.09093\",\n \"transformers\"\n]"},"pipeline_tag":{"kind":"string","value":"text-classification"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"ElectraForSequenceClassification\"\n ],\n \"model_type\": \"electra\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":110,"string":"110"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: \"transformers.js\"\n---\n\nhttps://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english with ONNX weights to be compatible with Transformers.js.\n\nNote: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`)."}}},{"rowIdx":28096,"cells":{"modelId":{"kind":"string","value":"Carlork314/Xd"},"tags":{"kind":"list like","value":[],"string":"[]"},"pipeline_tag":{"kind":"null"},"config":{"kind":"string","value":"{\n \"architectures\": null,\n \"model_type\": null,\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":0,"string":"0"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: \"transformers.js\"\n---\n\nhttps://huggingface.co/distilbert-base-cased-distilled-squad with ONNX weights to be compatible with Transformers.js.\n\nNote: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`)."}}},{"rowIdx":28097,"cells":{"modelId":{"kind":"string","value":"CarlosPR/mt5-spanish-memmories-analysis"},"tags":{"kind":"list like","value":["pytorch","mt5","text2text-generation","transformers","autotrain_compatible"],"string":"[\n \"pytorch\",\n \"mt5\",\n \"text2text-generation\",\n \"transformers\",\n \"autotrain_compatible\"\n]"},"pipeline_tag":{"kind":"string","value":"text2text-generation"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"MT5ForConditionalGeneration\"\n ],\n \"model_type\": \"mt5\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":7,"string":"7"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: \"transformers.js\"\n---\n\nhttps://huggingface.co/bert-base-uncased with ONNX weights to be compatible with Transformers.js.\n\nNote: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`)."}}},{"rowIdx":28098,"cells":{"modelId":{"kind":"string","value":"Carolhuehuehuehue/Sla"},"tags":{"kind":"list like","value":[],"string":"[]"},"pipeline_tag":{"kind":"null"},"config":{"kind":"string","value":"{\n \"architectures\": null,\n \"model_type\": null,\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": null,\n \"max_length\": null\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":0,"string":"0"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: \"transformers.js\"\n---\n\nhttps://huggingface.co/sshleifer/distilbart-cnn-6-6 with ONNX weights to be compatible with Transformers.js.\n\nNote: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`)."}}},{"rowIdx":28099,"cells":{"modelId":{"kind":"string","value":"Cedille/fr-boris"},"tags":{"kind":"list like","value":["pytorch","gptj","text-generation","fr","dataset:c4","arxiv:2202.03371","transformers","causal-lm","license:mit","has_space"],"string":"[\n \"pytorch\",\n \"gptj\",\n \"text-generation\",\n \"fr\",\n \"dataset:c4\",\n \"arxiv:2202.03371\",\n \"transformers\",\n \"causal-lm\",\n \"license:mit\",\n \"has_space\"\n]"},"pipeline_tag":{"kind":"string","value":"text-generation"},"config":{"kind":"string","value":"{\n \"architectures\": [\n \"GPTJForCausalLM\"\n ],\n \"model_type\": \"gptj\",\n \"task_specific_params\": {\n \"conversational\": {\n \"max_length\": null\n },\n \"summarization\": {\n \"early_stopping\": null,\n \"length_penalty\": null,\n \"max_length\": null,\n \"min_length\": null,\n \"no_repeat_ngram_size\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"text-generation\": {\n \"do_sample\": true,\n \"max_length\": 50\n },\n \"translation_en_to_de\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_fr\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n },\n \"translation_en_to_ro\": {\n \"early_stopping\": null,\n \"max_length\": null,\n \"num_beams\": null,\n \"prefix\": null\n }\n }\n}"},"downloads":{"kind":"number","value":401,"string":"401"},"first_commit":{"kind":"null"},"card":{"kind":"string","value":"---\nlibrary_name: \"transformers.js\"\n---\n\nhttps://huggingface.co/google/flan-t5-small with ONNX weights to be compatible with Transformers.js.\n\nNote: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`)."}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":280,"numItemsPerPage":100,"numTotalItems":30344,"offset":28000,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1NzY2NTc5NCwic3ViIjoiL2RhdGFzZXRzL2xpYnJhcmlhbi1ib3RzL2NhcmRfd2l0aF9maXJzdF9jb21taXQiLCJleHAiOjE3NTc2NjkzOTQsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.vxN0opVuRVJGvUwFiultMvFcL4_i30KGOWMUIGEJRoYBe2Q1qxZxuzh9gfslyoBWtJxg9Xd4gnlLO-ReWc4RCg","displayUrls":true},"discussionsStats":{"closed":0,"open":0,"total":0},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
AnonymousSub/rule_based_roberta_twostagetriplet_hier_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
23
null
Access to model suplucky/sup is restricted and you are not in the authorized list. Visit https://huggingface.co/suplucky/sup to ask for access.
AnonymousSub/unsup-consert-base_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 --- Model converted to use with, https://github.com/NeusZimmer/ONNX-ModularUI
AnonymousSub/unsup-consert-emanuals
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### DreamStep Dreambooth model trained by grisha2000 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
AnonymousSubmission/pretrained-model-1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: DIPROMATS_subtask_1_base_train results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # DIPROMATS_subtask_1_base_train This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5120 - F1: 0.8267 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.4533 | 1.0 | 182 | 0.3471 | 0.7932 | | 0.1763 | 2.0 | 364 | 0.3473 | 0.8116 | | 0.1359 | 3.0 | 546 | 0.3887 | 0.8144 | | 0.1728 | 4.0 | 728 | 0.4311 | 0.8147 | | 0.1519 | 5.0 | 910 | 0.4881 | 0.8236 | | 0.0085 | 6.0 | 1092 | 0.5120 | 0.8267 | | 0.1828 | 7.0 | 1274 | 0.5591 | 0.8118 | | 0.0071 | 8.0 | 1456 | 0.6079 | 0.8263 | | 0.0015 | 9.0 | 1638 | 0.6919 | 0.8235 | | 0.0241 | 10.0 | 1820 | 0.6990 | 0.8221 | ### Framework versions - Transformers 4.28.1 - Pytorch 1.13.1 - Datasets 2.12.0 - Tokenizers 0.13.3
Anorak/nirvana
[ "pytorch", "pegasus", "text2text-generation", "unk", "dataset:Anorak/autonlp-data-Niravana-test2", "transformers", "autonlp", "co2_eq_emissions", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-finetuned-lr1e-06-epochs50 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-finetuned-lr1e-06-epochs50 This model is a fine-tuned version of [distilbert-base-cased-distilled-squad](https://huggingface.co/distilbert-base-cased-distilled-squad) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.1397 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 10 | 5.6380 | | No log | 2.0 | 20 | 5.2148 | | No log | 3.0 | 30 | 4.9729 | | No log | 4.0 | 40 | 4.8036 | | No log | 5.0 | 50 | 4.6566 | | No log | 6.0 | 60 | 4.5248 | | No log | 7.0 | 70 | 4.4054 | | No log | 8.0 | 80 | 4.2868 | | No log | 9.0 | 90 | 4.1864 | | No log | 10.0 | 100 | 4.0935 | | No log | 11.0 | 110 | 4.0126 | | No log | 12.0 | 120 | 3.9390 | | No log | 13.0 | 130 | 3.8698 | | No log | 14.0 | 140 | 3.8036 | | No log | 15.0 | 150 | 3.7400 | | No log | 16.0 | 160 | 3.6834 | | No log | 17.0 | 170 | 3.6343 | | No log | 18.0 | 180 | 3.5871 | | No log | 19.0 | 190 | 3.5456 | | No log | 20.0 | 200 | 3.5103 | | No log | 21.0 | 210 | 3.4753 | | No log | 22.0 | 220 | 3.4419 | | No log | 23.0 | 230 | 3.4087 | | No log | 24.0 | 240 | 3.3805 | | No log | 25.0 | 250 | 3.3562 | | No log | 26.0 | 260 | 3.3345 | | No log | 27.0 | 270 | 3.3151 | | No log | 28.0 | 280 | 3.2957 | | No log | 29.0 | 290 | 3.2772 | | No log | 30.0 | 300 | 3.2620 | | No log | 31.0 | 310 | 3.2497 | | No log | 32.0 | 320 | 3.2358 | | No log | 33.0 | 330 | 3.2254 | | No log | 34.0 | 340 | 3.2158 | | No log | 35.0 | 350 | 3.2057 | | No log | 36.0 | 360 | 3.1972 | | No log | 37.0 | 370 | 3.1877 | | No log | 38.0 | 380 | 3.1800 | | No log | 39.0 | 390 | 3.1722 | | No log | 40.0 | 400 | 3.1664 | | No log | 41.0 | 410 | 3.1630 | | No log | 42.0 | 420 | 3.1585 | | No log | 43.0 | 430 | 3.1538 | | No log | 44.0 | 440 | 3.1488 | | No log | 45.0 | 450 | 3.1454 | | No log | 46.0 | 460 | 3.1422 | | No log | 47.0 | 470 | 3.1414 | | No log | 48.0 | 480 | 3.1407 | | No log | 49.0 | 490 | 3.1399 | | 2.8494 | 50.0 | 500 | 3.1397 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
AnthonyNelson/DialoGPT-small-ricksanchez
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer datasets: - klue metrics: - f1 model-index: - name: kogpt2-base-v2-finetuned-klue-ner results: - task: name: Token Classification type: token-classification dataset: name: klue type: klue config: ner split: validation args: ner metrics: - name: F1 type: f1 value: 0.37298165525403665 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kogpt2-base-v2-finetuned-klue-ner This model is a fine-tuned version of [skt/kogpt2-base-v2](https://huggingface.co/skt/kogpt2-base-v2) on the klue dataset. It achieves the following results on the evaluation set: - Loss: 0.4076 - F1: 0.3730 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.6084 | 1.0 | 876 | 0.5353 | 0.2118 | | 0.3911 | 2.0 | 1752 | 0.4691 | 0.3041 | | 0.2855 | 3.0 | 2628 | 0.4076 | 0.3730 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Anthos23/distilbert-base-uncased-finetuned-sst2
[ "tf", "tensorboard", "distilbert", "text-classification", "transformers", "generated_from_keras_callback", "license:apache-2.0" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - f1 model-index: - name: ec_classfication_0502_distilbert_base_uncased results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ec_classfication_0502_distilbert_base_uncased This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.9120 - F1: 0.8222 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | No log | 1.0 | 59 | 0.6145 | 0.5753 | | No log | 2.0 | 118 | 0.5000 | 0.7619 | | No log | 3.0 | 177 | 0.5990 | 0.7 | | No log | 4.0 | 236 | 0.5030 | 0.8235 | | No log | 5.0 | 295 | 0.6379 | 0.8478 | | No log | 6.0 | 354 | 0.6739 | 0.8478 | | No log | 7.0 | 413 | 0.7597 | 0.8090 | | No log | 8.0 | 472 | 0.7854 | 0.8222 | | 0.1878 | 9.0 | 531 | 0.8594 | 0.8222 | | 0.1878 | 10.0 | 590 | 0.8947 | 0.8090 | | 0.1878 | 11.0 | 649 | 0.9086 | 0.8222 | | 0.1878 | 12.0 | 708 | 0.9130 | 0.8222 | | 0.1878 | 13.0 | 767 | 0.9070 | 0.8222 | | 0.1878 | 14.0 | 826 | 0.9117 | 0.8222 | | 0.1878 | 15.0 | 885 | 0.9120 | 0.8222 | ### Framework versions - Transformers 4.27.3 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.2
Anthos23/my-awesome-model
[ "pytorch", "tf", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-finetuned-lr1e-06-epochs10 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-finetuned-lr1e-06-epochs10 This model is a fine-tuned version of [distilbert-base-cased-distilled-squad](https://huggingface.co/distilbert-base-cased-distilled-squad) on the None dataset. It achieves the following results on the evaluation set: - Loss: 4.6132 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 10 | 5.6593 | | No log | 2.0 | 20 | 5.2648 | | No log | 3.0 | 30 | 5.0527 | | No log | 4.0 | 40 | 4.9205 | | No log | 5.0 | 50 | 4.8196 | | No log | 6.0 | 60 | 4.7436 | | No log | 7.0 | 70 | 4.6878 | | No log | 8.0 | 80 | 4.6452 | | No log | 9.0 | 90 | 4.6218 | | No log | 10.0 | 100 | 4.6132 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
ArBert/albert-base-v2-finetuned-ner-agglo
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # ellabettison/blocking-model This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('ellabettison/blocking-model') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('ellabettison/blocking-model') model = AutoModel.from_pretrained('ellabettison/blocking-model') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=ellabettison/blocking-model) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 2842 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.SoftmaxLoss.SoftmaxLoss` Parameters of the fit()-Method: ``` { "epochs": 40, "evaluation_steps": 500, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 178, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ArBert/bert-base-uncased-finetuned-ner-agglo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: afl-3.0 --- Sp-bert (BERT for Scandinavian Politics) was trained on political texts coming from Parliamentary speeches in four languages: Norwegian, Swedish, Danish and Icelandic.
ArBert/bert-base-uncased-finetuned-ner-kmeans-twitter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model amogh23/autotrain-sentiment-54660127837 is restricted and you are not in the authorized list. Visit https://huggingface.co/amogh23/autotrain-sentiment-54660127837 to ask for access.
ArBert/bert-base-uncased-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: mit language: - en tags: - cosmology - emulator - physics - 21cmFAST --- # 21cmEMU [![PyPI](https://img.shields.io/pypi/v/py21cmemu.svg)][pypi_] [![Status](https://img.shields.io/pypi/status/py21cmemu.svg)][status] [![Python Version](https://img.shields.io/pypi/pyversions/py21cmemu)][python version] [![License](https://img.shields.io/pypi/l/py21cmemu)][license] [![Read the documentation at https://21cmEMU.readthedocs.io/](https://img.shields.io/readthedocs/py21cmEMU/latest.svg?label=Read%20the%20Docs)][read the docs] [![Tests](https://github.com/21cmFAST/21cmEMU/workflows/Tests/badge.svg)][tests] [![Codecov](https://codecov.io/gh/21cmFAST/21cmEMU/branch/main/graph/badge.svg)][codecov] [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&logoColor=white)][pre-commit] [![Black](https://img.shields.io/badge/code%20style-black-000000.svg)][black] [pypi_]: https://pypi.org/project/py21cmemu/ [status]: https://pypi.org/project/py21cmemu/ [python version]: https://pypi.org/project/py21cmemu [read the docs]: https://21cmemu.readthedocs.io/ [tests]: https://github.com/21cmFAST/21cmEMU/actions?workflow=Tests [codecov]: https://app.codecov.io/gh/21cmFAST/21cmEMU [pre-commit]: https://github.com/pre-commit/pre-commit [black]: https://github.com/psf/black ## Features - Uses Tensorflow to emulate the following summary statistics: 21-cm power spectrum, 21-cm global brightness temperature, IGM spin temperature, and neutral fraction. - Uses 21cmFAST to analytically calculate the UV luminosity functions and the Thomson optical depth to the CMB. ## Requirements - Tensorflow >= 2.6 - 21cmFAST ## Installation You can install _py21cmEMU_ via [pip] from [PyPI]: ```console $ pip install py21cmemu ``` ## Usage Please see the [Command-line Reference] for details. ## Contributing Contributions are very welcome. To learn more, see the [Contributor Guide]. ## License Distributed under the terms of the [MIT license][license], _21cmEMU_ is free and open source software. ## Issues If you encounter any problems, please [file an issue] along with a detailed description. ## Credits This project was generated from [@cjolowicz]'s [Hypermodern Python Cookiecutter] template. [@cjolowicz]: https://github.com/cjolowicz [pypi]: https://pypi.org/ [hypermodern python cookiecutter]: https://github.com/cjolowicz/cookiecutter-hypermodern-python [file an issue]: https://github.com/21cmFAST/21cmEMU/issues [pip]: https://pip.pypa.io/ <!-- github-only --> [license]: https://github.com/21cmFAST/21cmEMU/blob/main/LICENSE [contributor guide]: https://github.com/21cmFAST/21cmEMU/blob/main/CONTRIBUTING.md [command-line reference]: https://21cmEMU.readthedocs.io/en/latest/usage.html
ArBert/roberta-base-finetuned-ner-agglo-twitter
[ "pytorch", "tensorboard", "roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: bert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.6158979909555603 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-cola This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.6485 - Matthews Correlation: 0.6159 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.3168255304753761e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - max_length: 64, - dropout: 0.3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5039 | 1.0 | 535 | 0.4617 | 0.4879 | | 0.3299 | 2.0 | 1070 | 0.4489 | 0.5889 | | 0.2306 | 3.0 | 1605 | 0.6485 | 0.5266 | | 0.1695 | 4.0 | 2140 | 0.6485 | 0.6159 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
ArBert/roberta-base-finetuned-ner-gmm
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: bigscience-openrail-m --- # Automated cell nuclei segmentation and classification Models of the [tumourkit](https://github.com/Jerry-Master/lung-tumour-study) library. The key idea behind these models is illustrated by the following image. ![graph example](https://huggingface.co/Jerry-Master/Hovernet-plus-Graphs/resolve/main/examples/graph_overlay.png) The objective is to detect and classify cells of different tissues. Different models trained with tissue from different organs and stainings are provided. ## Lung (H&E) ![lung example](https://huggingface.co/Jerry-Master/Hovernet-plus-Graphs/resolve/main/examples/lung_he.png) ## Breast (HER2) ![breast example](https://huggingface.co/Jerry-Master/Hovernet-plus-Graphs/resolve/main/examples/breast_her2.png) ## Consep: Colorectal (H&E) ![consep example](https://huggingface.co/Jerry-Master/Hovernet-plus-Graphs/resolve/main/examples/colorectal_he.png) ## Monusac: Miscelaneous (H&E) ![monusac example](https://huggingface.co/Jerry-Master/Hovernet-plus-Graphs/resolve/main/examples/monusac.png) ## Model description The model is made by [Hovernet](https://github.com/vqdang/hover_net) as a backbone and a graph neural network on top to improve the classification step. Each backbone comes trained at two resolutions: 270x270 and 518x518. They also come in two version each, trained from scratch or fine-tuned from the consep checkpoint of Hovernet (FT). Then, for each Hovernet model, five graph neural networks are provided that can be used on top. Four graph convolutional neural networks trained with different sets of features and one graph attention network trained with all the features. To use the models the tumourkit library comes with a simple [demo](https://lung-tumour-study.readthedocs.io/en/latest/usage.html#gradio-demo) that you can try. Beware, on CPU it takes nearly 10 minutes per 1024x1024 image. ## Uses ### Intended use The lung models are built to estimate the percentage of tumoural cells in a given whole slide image (WSI). It is supposed to be used to accelerate histologist work and give priorities among huge amounts of WSIs to analyse. The other three models are provided for research purposes only. ### Misuse By no means these models are supposed to substitute a medical expert, and they are not built for diagnosis. Usage in any critical situation is discouraged.
ArBert/roberta-base-finetuned-ner-kmeans-twitter
[ "pytorch", "tensorboard", "roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.923 - name: F1 type: f1 value: 0.9230596990121587 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2215 - Accuracy: 0.923 - F1: 0.9231 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8518 | 1.0 | 250 | 0.3235 | 0.9055 | 0.9035 | | 0.2557 | 2.0 | 500 | 0.2215 | 0.923 | 0.9231 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Araby/Arabic-TTS
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- datasets: - imagenet-1k library_name: transformers pipeline_tag: image-classification license: other tags: - vision - image-classification --- # MobileViTv2 (mobilevitv2-1.0-imagenet1k-256) <!-- Provide a quick summary of what the model is/does. --> MobileViTv2 is the second version of MobileViT. It was proposed in [Separable Self-attention for Mobile Vision Transformers](https://arxiv.org/abs/2206.02680) by Sachin Mehta and Mohammad Rastegari, and first released in [this](https://github.com/apple/ml-cvnets) repository. The license used is [Apple sample code license](https://github.com/apple/ml-cvnets/blob/main/LICENSE). Disclaimer: The team releasing MobileViT did not write a model card for this model so this model card has been written by the Hugging Face team. ### Model Description <!-- Provide a longer summary of what this model is. --> MobileViTv2 is constructed by replacing the multi-headed self-attention in MobileViT with separable self-attention. ### Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=mobilevitv2) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model to classify an image of the COCO 2017 dataset into one of the 1,000 ImageNet classes: ```python from transformers import MobileViTv2FeatureExtractor, MobileViTv2ForImageClassification from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) feature_extractor = MobileViTv2FeatureExtractor.from_pretrained("shehan97/mobilevitv2-1.0-imagenet1k-256") model = MobileViTv2ForImageClassification.from_pretrained("shehan97/mobilevitv2-1.0-imagenet1k-256") inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` Currently, both the feature extractor and model support PyTorch. ## Training data The MobileViT model was pretrained on [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k), a dataset consisting of 1 million images and 1,000 classes. ### BibTeX entry and citation info ```bibtex @inproceedings{vision-transformer, title = {Separable Self-attention for Mobile Vision Transformers}, author = {Sachin Mehta and Mohammad Rastegari}, year = {2022}, URL = {https://arxiv.org/abs/2206.02680} } ```
AriakimTaiyo/DialoGPT-revised-Kumiko
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-Cartpole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
ArthurBaia/bert-base-portuguese-cased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - text-generation widget: - text: "I love 🤗 AutoTrain because " datasets: - huggingface/autotrain-data-z0yf-urlq-kec7 co2_eq_emissions: emissions: 0 --- # Model Trained Using AutoTrain - Problem type: Text Generation - CO2 Emissions (in grams): 0.0000 ## Validation Metrics loss: nan
AshLukass/AshLukass
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### VanGoghStyle2 Dreambooth model trained by reallylongaddress with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Sample pictures of this concept:
Ashl3y/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: CoryMagic/wikitext-distill results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # CoryMagic/wikitext-distill This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 3.3345 - Validation Loss: 3.2376 - Epoch: 3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 2e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 3.5754 | 3.3649 | 0 | | 3.4385 | 3.3004 | 1 | | 3.3769 | 3.2633 | 2 | | 3.3345 | 3.2376 | 3 | ### Framework versions - Transformers 4.21.3 - TensorFlow 2.9.2 - Datasets 2.4.0 - Tokenizers 0.12.1
AshtonBenson/DialoGPT-small-quentin-coldwater
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: bert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5855730181125508 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-cola This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.6423 - Matthews Correlation: 0.5856 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.4932 | 1.0 | 535 | 0.5174 | 0.5028 | | 0.2995 | 2.0 | 1070 | 0.4694 | 0.5782 | | 0.1959 | 3.0 | 1605 | 0.6423 | 0.5856 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Aspect11/DialoGPT-Medium-LiSBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-05-02T10:13:18Z
--- tags: - autotrain - text-classification language: - en widget: - text: "I love AutoTrain 🤗" datasets: - denis-gordeev/autotrain-data-insightful_keywords_2 co2_eq_emissions: emissions: 1.2479206832856347 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 54689127880 - CO2 Emissions (in grams): 1.2479 ## Validation Metrics - Loss: 0.514 - Accuracy: 0.768 - Precision: 0.734 - Recall: 0.765 - AUC: 0.827 - F1: 0.749 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/denis-gordeev/autotrain-insightful_keywords_2-54689127880 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("denis-gordeev/autotrain-insightful_keywords_2-54689127880", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("denis-gordeev/autotrain-insightful_keywords_2-54689127880", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
Atchuth/DialoGPT-small-MBOT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: afl-3.0 tags: - summarization - t5 - medical - clinical language: en datasets: - MIMIC-III widget: - again noted is the large intraparenchymal hemorrhage in the posterior right frontal lobe with extension into both lateral ventricles. the degree of surrounding edema and effacement of adjacent sulci is unchanged. there is minor contralateral shift of normal midline structures. the ventricular size is unchanged. subarachnoid blood is now seen in the left frontal and parietal lobes, likely due to recirculation of the ventricular blood. - a least two attempts were made at imaging, however, the study remains severely limited by patient motion. minimal hyperdensity tracks along a left parietal sulcus (2a:18) is equivocal for a small subarachnoid hemorhage. there is no large mass effect detected. there is no shift of normally midline structures. a minimally displaced zygomatic fracture is present (2a:9). the middle ear cavities, mastoid air cells are clear. there is extensive soft tissue swelling overlying the right frontal calvarium with swelling extending to the right preseptal soft tissues (2a:12). there is mild - moderate mucosal thickening within the ethmoid and maxillary sinuses with some fluid and fluid mucosal thickening in the sphenoid sinus. inference: parameters: max_length: 350 metrics: - rouge-l --- # Impression section Generator For Radiology Reports 🏥 This model is is the result of participation of SINAI team in [Task 1B: Radiology Report Summarization](https://vilmedic.app/misc/bionlp23/sharedtask) at the BioNLP workshop held on ACL 2023. The goal of this task is to foster development of automatic radiology report summarization systems and expanding their applicability by incorporating seven different modalities and anatomies in the provided data. We propose to automate the generation of radiology impressions with "sequence-to-sequence" learning that leverages the power of publicly available pre-trained models, both general domain and biomedical domain-specific. This repository provides access to our best-performing system that resulted from fine-tuning of [Sci-Five base](https://huggingface.co/razent/SciFive-base-Pubmed_PMC), which is T5 model trained for extra 200k steps to optimize it in the context of biomedical literature. # Results The official evaluation results prove that adaptation of a general-domain system for biomedical literature is beneficial for the subsequent fine-tuning for radiology report summarization task. The Table below summarizes the official scores obtained by this model during the official evaluation. Team standings re available [here](https://vilmedic.app/misc/bionlp23/leaderboard/). | BLEU4 | ROUGE-L | BERTscore | F1-RadGraph |-----------|--------|----------|----------| | 017.38 | 32.32 | 55.04 | 33.96 | # System description paper and citation The paper with the detailed description of the system will be published in the [Proceedings of the 22st Workshop on Biomedical Language Processing](https://aclanthology.org/venues/bionlp/).
Ateeb/EmotionDetector
[ "pytorch", "funnel", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "FunnelForSequenceClassification" ], "model_type": "funnel", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- license: creativeml-openrail-m base_model: runwayml/stable-diffusion-v1-5 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - textual_inversion inference: true --- # Textual inversion text2image fine-tuning - LittleFlyingSheep/textual_inversion_cat These are textual inversion adaption weights for runwayml/stable-diffusion-v1-5. You can find some example images in the following.
Atlasky/Turkish-Negator
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-05-02T10:36:14Z
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-unit4test results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Augustab/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-05-02T10:39:28Z
--- tags: - autotrain - text-generation widget: - text: "I love 🤗 AutoTrain because " datasets: - huggingface/autotrain-data-5u7lo-5p6l-zjp0 co2_eq_emissions: emissions: 0 --- # Model Trained Using AutoTrain - Problem type: Text Generation - CO2 Emissions (in grams): 0.0000 ## Validation Metrics loss: nan
Augustvember/WOKKAWOKKA
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2023-05-02T10:40:46Z
--- license: apache-2.0 tags: - setfit - sentence-transformers - text-classification pipeline_tag: text-classification --- # amittian/setfit_asoc_version_0_0_1 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("amittian/setfit_asoc_version_0_0_1") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
Augustvember/WokkaBot3
[ "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: openrail datasets: - balgot/stylegan3-annotated language: - en metrics: - mse tags: - face-generation - stylegan3 library_name: pytorch --- # Text-to-StyleGAN3 Latent Space Translation This model was created as a part of the project for FI:PA228 (Masaryk University), inspired by this paper: [Face Generation from Textual Features using Conditionally trained Inputs to Generative Adversarial Networks](https://arxiv.org/abs/2301.09123) It was trained on the generated dataset from BLIP and StyleGAN3. See the [corresponding notebook](https://colab.research.google.com/drive/14rDcCc0Xr1L1Ax3aKezEhmcn81vXGVQ7?usp=sharing) for further details. ## How to use: ```python import torch.nn as nn # for now, the model class needs to be defined, so... class LaTran(nn.Module): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.pipe = nn.Sequential( nn.Linear(384, 512), nn.ReLU(), nn.Linear(512, 512) ) def forward(self, v): return self.pipe(v.unsqueeze(1)) # Instantiate and load the model dev = ... # device to use PATH = "translation_model-sd.pt" # local path model = LaTran().to(dev) model.load_state_dict(torch.load(TRANSLATION_MODEL, map_location=dev)) ``` ## Demo For the demo of the whole pipeline, or how this model helps to generate a final image, visits [text-to-stylegan HF space](https://huggingface.co/spaces/balgot/text-to-stylegan3). ## Examples * Prompt: `attractive young woman, blond hair` ![image of attractive young women](attractive_young_woman_blonde.png) * Prompt initial: `cute young boy, blond hair, blue eyes, smiling` * Prompt second: `old man, short gray hair, glasses, wearing hat` <img src="https://huggingface.co/balgot/bert-2-stylegan3/resolve/main/young2old.gif" width="200" height="200" />
Augustvember/WokkaBot5
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-05-02T10:47:06Z
--- tags: - CartPole-v1 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 490.80 +/- 27.60 name: mean_reward verified: false --- # PPO Agent Playing CartPole-v1 This is a trained model of a PPO agent playing CartPole-v1. # Hyperparameters ```python {'repo_id': 'sam133/ppo_cartpole_no_transformer' 'exp_name': '1.py' 'seed': 1 'torch_deterministic': True 'cuda': True 'track': False 'wandb_project_name': 'cleanRL' 'wandb_entity': None 'capture_video': False 'env_id': 'CartPole-v1' 'total_timesteps': 500000 'learning_rate': 0.00025 'num_envs': 4 'num_steps': 128 'anneal_lr': True 'gae': True 'gamma': 0.99 'gae_lambda': 0.95 'num_minibatches': 4 'update_epochs': 4 'norm_adv': True 'clip_coef': 0.2 'clip_vloss': True 'ent_coef': 0.01 'vf_coef': 0.5 'max_grad_norm': 0.5 'target_kl': None 'batch_size': 512 'minibatch_size': 128} ```
Augustvember/wokkabottest2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-fr results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.fr split: validation args: PAN-X.fr metrics: - name: F1 type: f1 value: 0.8334173810724491 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.2700 - F1: 0.8334 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.5754 | 1.0 | 191 | 0.3555 | 0.7842 | | 0.2623 | 2.0 | 382 | 0.2806 | 0.8180 | | 0.1744 | 3.0 | 573 | 0.2700 | 0.8334 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Ayham/albert_distilgpt2_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - LunarLander-v2 - ppo - deep-reinforcement-learning - reinforcement-learning - custom-implementation - deep-rl-course model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -80.30 +/- 49.25 name: mean_reward verified: false --- # PPO Agent Playing LunarLander-v2 This is a trained model of a PPO agent playing LunarLander-v2. # Hyperparameters
Ayham/bert_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: imdb_model results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # imdb_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.4974 - Validation Loss: 0.2063 - Train Accuracy: 0.93 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 625, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 0.4974 | 0.2063 | 0.93 | 0 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Ayham/xlnet_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2023-05-02T12:16:17Z
--- license: apache-2.0 language: - zh --- # BIOC GPT
Aymene/opus-mt-en-ro-finetuned-en-to-ro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_keras_callback model-index: - name: layoutlm-funsd-tf results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # layoutlm-funsd-tf This model is a fine-tuned version of [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.2920 - Validation Loss: 0.6882 - Train Overall Precision: 0.7061 - Train Overall Recall: 0.7943 - Train Overall F1: 0.7476 - Train Overall Accuracy: 0.7966 - Epoch: 7 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 3e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Train Overall Precision | Train Overall Recall | Train Overall F1 | Train Overall Accuracy | Epoch | |:----------:|:---------------:|:-----------------------:|:--------------------:|:----------------:|:----------------------:|:-----:| | 1.7368 | 1.4201 | 0.2646 | 0.3051 | 0.2834 | 0.5163 | 0 | | 1.2040 | 0.9290 | 0.5253 | 0.6051 | 0.5624 | 0.7138 | 1 | | 0.8330 | 0.8307 | 0.5912 | 0.7010 | 0.6414 | 0.7294 | 2 | | 0.6119 | 0.6724 | 0.6667 | 0.7697 | 0.7145 | 0.7902 | 3 | | 0.4706 | 0.6231 | 0.6905 | 0.7883 | 0.7362 | 0.8068 | 4 | | 0.3759 | 0.6366 | 0.7203 | 0.7933 | 0.7550 | 0.8077 | 5 | | 0.3043 | 0.7168 | 0.6989 | 0.7953 | 0.7440 | 0.7937 | 6 | | 0.2920 | 0.6882 | 0.7061 | 0.7943 | 0.7476 | 0.7966 | 7 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
Ayoola/pytorch_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.931 - name: F1 type: f1 value: 0.9309844319832071 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2160 - Accuracy: 0.931 - F1: 0.9310 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8342 | 1.0 | 250 | 0.3068 | 0.9115 | 0.9084 | | 0.248 | 2.0 | 500 | 0.2160 | 0.931 | 0.9310 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Ayou/chinese_mobile_bert
[ "pytorch", "mobilebert", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "MobileBertForMaskedLM" ], "model_type": "mobilebert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- license: openrail --- Indians are known for bringing an incredible level of energy and excitement to whatever sporting event they participate in. They have a special and amazing love for the sport of playing it, and in particular, cricket is one of the sports that they enjoy playing. Not only do the people of India have a profound fondness for this Betbook247 Exchange, but their nation is also responsible for producing some of the most gifted players in the history of cricket. People in India wager a significant amount of money on the <a href="https://getcricketidonline.com/betbook247-new-id-sign-up-register.html">Betbook247</a> Exchange, and one of the reasons for this is because of this. As a result of the expansion of cricket betting, individuals now have a wonderful opportunity to engage in the sport of cricket. They are now able to not only stay up with the most recent cricket news, but also have the possibility to make money while playing their favorite sport at the same time. The frequency of Betbook247 Exchange in India may be proved by the subsequent explanations that are offered below; these explanations are listed in order from most relevant to least relevant. buyonline cricket id https://vocal.media/gamers/how-to-use-king-exchange https://vocal.media/gamers/how-to-use-bdbetway https://vocal.media/gamers/how-to-use-fairexch9 https://vocal.media/gamers/how-to-use-lotusbook247-com-login https://vocal.media/gamers/how-to-use-matchbox9 https://vocal.media/gamers/how-to-use-ambani-book-365 https://vocal.media/gamers/how-to-use-dafabet-login https://vocal.media/gamers/how-to-use-satsport247 https://vocal.media/gamers/how-to-use-10cric10 https://vocal.media/gamers/how-to-use-abexch9 https://vocal.media/gamers/how-to-use-cricketbet9 https://vocal.media/gamers/how-to-use-doexch https://vocal.media/gamers/how-to-use-lucky7 https://vocal.media/gamers/how-to-use-tenexch https://vocal.media/gamers/how-to-use-4rabet-login https://vocal.media/gamers/how-to-use-skyinplay https://vocal.media/gamers/how-to-use-mahakal-book https://vocal.media/gamers/how-to-use-silver-exchange-id https://vocal.media/gamers/how-to-use-rajbet https://vocal.media/gamers/how-to-use-pb77-exchange https://vocal.media/gamers/how-to-use-12bet-login https://vocal.media/gamers/how-to-use-bet-star-exchange https://vocal.media/gamers/how-to-use-marvelbet-login https://vocal.media/gamers/how-to-use-jeetwin-login https://vocal.media/gamers/how-to-use-rajveerexch-login https://vocal.media/gamers/how-to-use-reddy-anna-book-login https://vocal.media/gamers/how-to-use-1win-login https://vocal.media/gamers/how-to-use-ssexch-login https://vocal.media/gamers/how-to-use-fun88-login https://vocal.media/gamers/how-to-use-pin-up-bet-login https://vocal.media/gamers/how-to-use-betbarter-login https://mohit.tistory.com/3 getcricket https://getcricket.tistory.com/2 iplbetting id https://mohit.tistory.com/2 https://getcricketid.bcz.com/2023/05/02/choose-your-bets-wisely-sign-up-for-betbhai9/ https://vocal.media/gamers/cricket-betting-phrases-jetexch9-symbol https://vocal.media/gamers/rajbet-exchange-id-provides-betting-on-the-indian-premier-league https://vocal.media/gamers/lotus365-io-accepts-bets-on-the-indian-premier-league https://vocal.media/gamers/ssexch-accepts-cricket-bets https://vocal.media/gamers/lotus-exchange-betting-id-app-allows-you-to-begin-earning-money-right-immediately https://vocal.media/gamers/yolo247-bet-allows-you-to-place-a-bet-on-any-indian-premier-league-team-of-your-choice https://vocal.media/gamers/skyinplay-betting-exchange-experience-cricket-betting-an-exciting-experience https://vocal.media/gamers/4rabet-ipl-betting-id-that-may-be-utilized-for-live-wagering https://vocal.media/gamers/sky247-login-is-a-useful-and-immersive-platform https://vocal.media/gamers/world777-id-login-is-a-well-known-betting-platform cricket betting https://mohit.tistory.com/4 https://vocal.media/gamers/download-the-all-cricket-id-app-for-android-to-put-bets-on-their-favorite-indian-premier-league-club https://vocal.media/gamers/online-cricket-id-login-provides-a-variety-of-betting-options https://vocal.media/gamers/online-cricket-betting-id-provides-cricket-betting https://vocal.media/gamers/online-cricket-betting-id-in-india-offers-a-reliable-and-secure-betting-environment https://vocal.media/gamers/exchange-cricket-id-will-teach-you-the-ins-and-outs-of-cricket-betting https://vocal.media/gamers/cricket-id-online-offers-single-game-betting https://vocal.media/gamers/cricket-betting-id-makes-it-simple-to-place-bets-on-cricket-matches https://vocal.media/gamers/cricket-id-online-provides-the-best-live-betting-and-in-play-action https://vocal.media/gamers/discover-the-benefits-of-ambani-book-betting-id-gambling https://vocal.media/gamers/gain-an-advantage-while-betting-on-prime-exch-betting-id onlinecricketidhindi https://mohit.tistory.com/5 https://vocal.media/gamers/get-your-goexch9-whatsapp-id-account-and-begin-betting-on-cricket-immediately https://vocal.media/gamers/getting-a-cricket-id-will-keep-you-ahead-of-the-betting-game-at-all-times https://vocal.media/gamers/get-the-dreamexch-whatsapp-number-online-on-your-device https://vocal.media/gamers/is-skyinplay-exchange-india-s-most-trustworthy-currency https://vocal.media/gamers/join-dreamexch-ipl-id-admin-in-the-cricket-madness https://vocal.media/gamers/join-dreamexch-ipl-to-take-advantage-of-the-best-welcome-incentive-available https://vocal.media/gamers/keep-outside-events-from-affecting-your-ambani-book-247-new-id-prospects https://vocal.media/gamers/join-dreamexch-register-to-take-advantage-of-the-best-welcome-incentive-available https://vocal.media/gamers/keep-up-to-date-with-dreamexch-new-id-s-real-time-score-administrator https://vocal.media/gamers/learn-how-to-read-odds-before-betting-on-ambani-book-365-sign-up
Azaghast/DistilBART-SCP-ParaSummarization
[ "pytorch", "bart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "BartForConditionalGeneration" ], "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 142, "min_length": 56, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - Pixelcopter-PLE-v0 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: PixelCopter results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Pixelcopter-PLE-v0 type: Pixelcopter-PLE-v0 metrics: - type: mean_reward value: 31.80 +/- 23.68 name: mean_reward verified: false --- # **Reinforce** Agent playing **Pixelcopter-PLE-v0** This is a trained model of a **Reinforce** agent playing **Pixelcopter-PLE-v0** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
Azaghast/GPT2-SCP-ContainmentProcedures
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2023-05-02T13:03:03Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - name: Accuracy type: accuracy value: 0.9255 - name: F1 type: f1 value: 0.925808056925967 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2186 - Accuracy: 0.9255 - F1: 0.9258 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 250 | 0.3109 | 0.913 | 0.9104 | | No log | 2.0 | 500 | 0.2186 | 0.9255 | 0.9258 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Azaghast/GPT2-SCP-Descriptions
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
Access to model kobamasa/kobakoba is restricted and you are not in the authorized list. Visit https://huggingface.co/kobamasa/kobakoba to ask for access.
Azaghast/GPT2-SCP-Miscellaneous
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
# Harry Potter Chatbot This model is a chatbot designed to generate responses in the style of Harry Potter, the protagonist from J.K. Rowling's popular book series and its movie adaptations. ## Model Architecture The `harry_potter_chatbot` is based on the [`DialoGPT-medium`](https://huggingface.co/microsoft/DialoGPT-medium) model, a powerful GPT-based architecture designed for generating conversational responses. It has been fine-tuned on a dataset of Harry Potter's dialogues from movie transcripts. ## Usage You can use this model to generate responses for a given input text using the following code: ```python from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("diabolic6045/harry_potter_chatbot") model = AutoModelForCausalLM.from_pretrained("diabolic6045/harry_potter_chatbot") input_text = "What's your favorite spell?" input_tokens = tokenizer.encode(input_text, return_tensors='pt') output_tokens = model.generate(input_tokens, max_length=50, num_return_sequences=1) output_text = tokenizer.decode(output_tokens[0], skip_special_tokens=True) print(output_text) ``` ## Limitations This model is specifically designed to generate responses in the style of Harry Potter and may not provide accurate or coherent answers to general knowledge questions. It may also sometimes generate inappropriate responses. Be cautious while using this model in a public setting or for critical applications. ## Training Data The model was fine-tuned on a dataset of Harry Potter's dialogues from movie transcripts. The dataset was collected from publicly available movie scripts and includes conversations and quotes from various Harry Potter films. ## Acknowledgments This model was trained using the Hugging Face [Transformers](https://github.com/huggingface/transformers) library, and it is based on the [`DialoGPT-medium`](https://huggingface.co/microsoft/DialoGPT-medium) model by Microsoft. Special thanks to the Hugging Face team and Microsoft for their contributions to the NLP community. --- Feel free to test the model and provide feedback or report any issues. Enjoy chatting with Harry Potter!
Azuris/DialoGPT-medium-envy
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - setfit - sentence-transformers - text-classification pipeline_tag: text-classification --- # sheetalp91/setfit-model-1 This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("sheetalp91/setfit-model-1") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
BAHIJA/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- tags: - generated_from_trainer datasets: - go_emotions metrics: - accuracy - f1 model-index: - name: goemotions_bertspanish_finetunig_d results: - task: name: Text Classification type: text-classification dataset: name: go_emotions type: go_emotions config: simplified split: test args: simplified metrics: - name: Accuracy type: accuracy value: 0.425 - name: F1 type: f1 value: 0.321340168917587 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # goemotions_bertspanish_finetunig_d This model is a fine-tuned version of [dccuchile/bert-base-spanish-wwm-cased](https://huggingface.co/dccuchile/bert-base-spanish-wwm-cased) on the go_emotions dataset. It achieves the following results on the evaluation set: - Loss: 3.2826 - Accuracy: 0.425 - F1: 0.3213 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 16 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
BE/demo-sentiment2021
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole8 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 500.00 +/- 0.00 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction
BJTK2/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: doxLake-frozen results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 metrics: - type: mean_reward value: 0.57 +/- 0.50 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="UncleanCode/doxLake-frozen", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
BOON/electra_qa
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-05-02T13:22:04Z
--- license: mit tags: - generated_from_keras_callback model-index: - name: jjdelgado/my_newsgroups_roberta_model results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # jjdelgado/my_newsgroups_roberta_model This model is a fine-tuned version of [RoBERTa-base](https://huggingface.co/RoBERTa-base) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.3069 - Validation Loss: 1.0260 - Train Accuracy: 0.6920 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': True, 'is_legacy_optimizer': False, 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 3535, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Accuracy | Epoch | |:----------:|:---------------:|:--------------:|:-----:| | 1.3069 | 1.0260 | 0.6920 | 0 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
BSC-LT/roberta-base-biomedical-es
[ "pytorch", "roberta", "fill-mask", "es", "arxiv:2109.03570", "arxiv:2109.07765", "transformers", "biomedical", "spanish", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
161
null
Access to model MeanBoss/Test is restricted and you are not in the authorized list. Visit https://huggingface.co/MeanBoss/Test to ask for access.
BSen/wav2vec2-large-xls-r-300m-turkish-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: openrail --- ## Thanks Big thanks to `Google` for lending us TPUv4s to train this model on. Big thanks to the Huggingface and Diffusers team for organising the JAX Diffusers sprint, giving support and making the JAX training scripts. Big thanks to StabilityAI for opensourcing the Stable Diffusion model, it has made a great impact on the community! ## About the dataset To make this demo as good as possible, our team spend a lot of time training a custom model. We used the LAION5B dataset to build our custom dataset, which contains 130k images of 15 types of rooms in almost 30 design styles. After fetching all these images, we started adding metadata such as captions (from the BLIP captioning model) and segmentation maps (from the HuggingFace UperNetForSemanticSegmentation model). ## About the model This dataset was then used to train the controlnet model to generate quality interior design images by using the segmentation maps and prompts as conditioning information for the model. By training on segmentation maps, the end user has a very finegrained control over which objects they want to place in their room. The training started from the `lllyasviel/control_v11p_sd15_seg` checkpoint, which is a robustly trained controlnet model conditioned on segmentation maps. This checkpoint got fine-tuned on a TPUv4 with the JAX framework. Afterwards, the checkpoint was converted into a PyTorch checkpoint for easy integration with the diffusers library. ## About the demo Our team made a streamlit demo where you can test out the capabilities of this model. The resulting model is used in a community pipeline that supports image2image and inpainting, so the user can keep elements of their room and change specific parts of the image. https://huggingface.co/spaces/controlnet-interior-design/controlnet-seg
Backedman/DialoGPT-small-Anika
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer datasets: - klue metrics: - f1 model-index: - name: kogpt2-base-v2-finetuned-klue-ner results: - task: name: Token Classification type: token-classification dataset: name: klue type: klue config: ner split: validation args: ner metrics: - name: F1 type: f1 value: 0.7404764644953389 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kogpt2-base-v2-finetuned-klue-ner This model is a fine-tuned version of [skt/kogpt2-base-v2](https://huggingface.co/skt/kogpt2-base-v2) on the klue dataset. It achieves the following results on the evaluation set: - Loss: 0.3849 - F1: 0.7405 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2667 | 1.0 | 1313 | 0.2522 | 0.7073 | | 0.173 | 2.0 | 2626 | 0.2498 | 0.7313 | | 0.1237 | 3.0 | 3939 | 0.2660 | 0.7330 | | 0.0861 | 4.0 | 5252 | 0.3104 | 0.7423 | | 0.0592 | 5.0 | 6565 | 0.3849 | 0.7405 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition
[ "pytorch", "wav2vec2", "audio-classification", "ja", "dataset:jtes", "transformers", "audio", "speech", "speech-emotion-recognition", "has_space" ]
audio-classification
{ "architectures": [ "HubertForSequenceClassification" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- license: mit tags: - generated_from_trainer model-index: - name: donut-base-sroie results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # donut-base-sroie This model is a fine-tuned version of [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.29.0.dev0 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Bakkes/BakkesModWiki
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model Leonhard1337/helloAI is restricted and you are not in the authorized list. Visit https://huggingface.co/Leonhard1337/helloAI to ask for access.
Barbarameerr/Barbara
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: sample-factory tags: - deep-reinforcement-learning - reinforcement-learning - sample-factory model-index: - name: APPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: doom_health_gathering_supreme type: doom_health_gathering_supreme metrics: - type: mean_reward value: 11.73 +/- 5.48 name: mean_reward verified: false --- A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment. This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory. Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/ ## Downloading the model After installing Sample-Factory, download the model with: ``` python -m sample_factory.huggingface.load_from_hub -r WilliamADSP/rl_course_vizdoom_health_gathering_supreme ``` ## Using the model To run the model after download, use the `enjoy` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme ``` You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag. See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details ## Training with this model To continue training with this model, use the `train` script corresponding to this environment: ``` python -m .usr.local.lib.python3.10.dist-packages.ipykernel_launcher --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000 ``` Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
Barkavi/totto-t5-base-bert-score-121K
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
51
null
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer datasets: - klue metrics: - f1 model-index: - name: kogpt2-base-v2-finetuned-klue-ner results: - task: name: Token Classification type: token-classification dataset: name: klue type: klue config: ner split: validation args: ner metrics: - name: F1 type: f1 value: 0.7679222357229647 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kogpt2-base-v2-finetuned-klue-ner This model is a fine-tuned version of [skt/kogpt2-base-v2](https://huggingface.co/skt/kogpt2-base-v2) on the klue dataset. It achieves the following results on the evaluation set: - Loss: 0.3344 - F1: 0.7679 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.4868 | 1.0 | 876 | 0.3412 | 0.7589 | | 0.2705 | 2.0 | 1752 | 0.3255 | 0.7692 | | 0.2199 | 3.0 | 2628 | 0.3220 | 0.7560 | | 0.181 | 4.0 | 3504 | 0.3122 | 0.7815 | | 0.1409 | 5.0 | 4380 | 0.3344 | 0.7679 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Batsy24/DialoGPT-medium-Twilight_BellaBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu - rouge model-index: - name: t5-small-codesearchnet-multilang-python-java-javascript-go results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-codesearchnet-multilang-python-java-javascript-go This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5955 - Bleu: 0.009 - Rouge1: 0.2321 - Rouge2: 0.0831 - Avg Length: 16.6192 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 10 - total_train_batch_size: 80 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Rouge1 | Rouge2 | Avg Length | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:----------:| | No log | 1.0 | 375 | 0.7349 | 0.0028 | 0.1562 | 0.0364 | 16.436 | | 2.3117 | 2.0 | 750 | 0.6613 | 0.0066 | 0.1818 | 0.0531 | 16.824 | | 0.6755 | 3.0 | 1125 | 0.6233 | 0.007 | 0.1957 | 0.0594 | 16.931 | | 0.5998 | 4.0 | 1500 | 0.6023 | 0.0082 | 0.202 | 0.063 | 16.7154 | | 0.5998 | 5.0 | 1875 | 0.5925 | 0.0096 | 0.2154 | 0.0703 | 16.5468 | | 0.5511 | 6.0 | 2250 | 0.5728 | 0.0091 | 0.2213 | 0.0774 | 15.7216 | | 0.5147 | 7.0 | 2625 | 0.5670 | 0.0111 | 0.2311 | 0.0815 | 16.6658 | | 0.4861 | 8.0 | 3000 | 0.5628 | 0.0089 | 0.2217 | 0.077 | 17.038 | | 0.4861 | 9.0 | 3375 | 0.5598 | 0.0103 | 0.2311 | 0.0825 | 16.362 | | 0.4526 | 10.0 | 3750 | 0.5589 | 0.0083 | 0.232 | 0.086 | 15.4298 | | 0.4329 | 11.0 | 4125 | 0.5649 | 0.0098 | 0.2349 | 0.0839 | 16.5468 | | 0.4102 | 12.0 | 4500 | 0.5633 | 0.0098 | 0.2366 | 0.0867 | 16.4136 | | 0.4102 | 13.0 | 4875 | 0.5841 | 0.01 | 0.2385 | 0.0869 | 15.9864 | | 0.3841 | 14.0 | 5250 | 0.5777 | 0.0128 | 0.2437 | 0.0894 | 16.842 | | 0.3673 | 15.0 | 5625 | 0.5955 | 0.009 | 0.2321 | 0.0831 | 16.6192 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
BeIR/query-gen-msmarco-t5-base-v1
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
1,816
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - bleu - rouge model-index: - name: t5-small-codesearchnet-multilang-python results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-codesearchnet-multilang-python This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.8169 - Bleu: 0.0012 - Rouge1: 0.1986 - Rouge2: 0.0594 - Avg Length: 14.004 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 10 - total_train_batch_size: 80 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Rouge1 | Rouge2 | Avg Length | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:----------:| | No log | 1.0 | 375 | 0.9943 | 0.0003 | 0.1637 | 0.0365 | 13.785 | | 2.445 | 2.0 | 750 | 0.8991 | 0.0002 | 0.171 | 0.041 | 13.0266 | | 0.8324 | 3.0 | 1125 | 0.8509 | 0.001 | 0.1931 | 0.0499 | 14.9474 | | 0.7567 | 4.0 | 1500 | 0.8184 | 0.0015 | 0.2019 | 0.0561 | 14.9598 | | 0.7567 | 5.0 | 1875 | 0.8002 | 0.0016 | 0.2097 | 0.0608 | 14.496 | | 0.6947 | 6.0 | 2250 | 0.7793 | 0.0016 | 0.2138 | 0.0631 | 14.6502 | | 0.658 | 7.0 | 2625 | 0.7721 | 0.0018 | 0.2104 | 0.0617 | 15.2 | | 0.6186 | 8.0 | 3000 | 0.7669 | 0.0023 | 0.2175 | 0.0642 | 15.7472 | | 0.6186 | 9.0 | 3375 | 0.7792 | 0.0027 | 0.2218 | 0.0664 | 15.862 | | 0.58 | 10.0 | 3750 | 0.7629 | 0.0005 | 0.1985 | 0.0591 | 12.0968 | | 0.5533 | 11.0 | 4125 | 0.7826 | 0.0027 | 0.2126 | 0.0631 | 16.9146 | | 0.5279 | 12.0 | 4500 | 0.7907 | 0.0025 | 0.2144 | 0.0626 | 16.656 | | 0.5279 | 13.0 | 4875 | 0.7827 | 0.0007 | 0.2019 | 0.0606 | 12.4734 | | 0.4964 | 14.0 | 5250 | 0.7933 | 0.0023 | 0.2204 | 0.0674 | 15.344 | | 0.4803 | 15.0 | 5625 | 0.8169 | 0.0012 | 0.1986 | 0.0594 | 14.004 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Bee-Garbs/DialoGPT-cartman-small
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-05-02T14:42:44Z
--- library_name: ml-agents tags: - SnowballTarget - deep-reinforcement-learning - reinforcement-learning - ML-Agents-SnowballTarget --- # **ppo** Agent playing **SnowballTarget** This is a trained model of a **ppo** agent playing **SnowballTarget** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-SnowballTarget 2. Step 1: Find your model_id: WilliamADSP/ppo-SnowballTarget 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Beelow/model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - text-classification language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - bodik/autotrain-data-js-classification-6-cat-dist-bert-uncased co2_eq_emissions: emissions: 0.0013888828664696802 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 54424128043 - CO2 Emissions (in grams): 0.0014 ## Validation Metrics - Loss: 0.332 - Accuracy: 0.914 - Macro F1: 0.917 - Micro F1: 0.914 - Weighted F1: 0.914 - Macro Precision: 0.927 - Micro Precision: 0.914 - Weighted Precision: 0.916 - Macro Recall: 0.910 - Micro Recall: 0.914 - Weighted Recall: 0.914 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/bodik/autotrain-js-classification-6-cat-dist-bert-uncased-54424128043 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("bodik/autotrain-js-classification-6-cat-dist-bert-uncased-54424128043", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("bodik/autotrain-js-classification-6-cat-dist-bert-uncased-54424128043", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
Begimay/Task
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: openrail datasets: - OpenAssistant/oasst1 tags: - male --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
BenDavis71/GPT-2-Finetuning-AIRaid
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2023-05-02T14:47:58Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 580.00 +/- 161.71 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ReadyP1 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga ReadyP1 -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga ReadyP1 ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
BertChristiaens/EmojiPredictor
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2023-05-02T14:57:08Z
--- license: cc-by-4.0 datasets: - patomp/thai-mscoco-2014-captions metrics: - recall --- ## Requirements ```bash pip install pythainlp pip install gensim>=4.3.1 pip install git+https://github.com/openai/CLIP.git ``` ## Usage Encode a text by ```python from transformers import AutoModel text = 'หมากำลังวิ่งในสนามหญ้า' model = AutoModel.from_pretrained("patomp/thai-light-multimodal-clip-and-distill", trust_remote_code=True) embeddings = model(text) print("Text features shape:", embeddings.shape) ``` Encode an image by ```python import torch import clip import requests from PIL import Image device = "cuda" if torch.cuda.is_available() else "cpu" model, preprocess = clip.load("ViT-B/32", device=device) url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) image = preprocess(image).unsqueeze(0).to(device) with torch.no_grad(): image_features = model.encode_image(image) print("Image features shape:", image_features.shape) ``` ## Benchmark On the test set of [Thai MS COCO 2014 dataset](https://huggingface.co/datasets/patomp/thai-mscoco-2014-captions) | Model \ Metrics | text-find-image recall@1 | text-find-image recall@10 | image-find-text recall@1 | image-find-text recall@10 | # text samples per second* | | :--- | --- | --- | --- | --- | --- | | **Multilingual Encoder** | | | | | | | [clip-ViT-B-32-multilingual-v1](https://huggingface.co/sentence-transformers/clip-ViT-B-32-multilingual-v1) | 0.075 | 0.242 | 0.096 | 0.286 | 251 | | [XLM-Roberta-Large-Vit-B-32](https://huggingface.co/M-CLIP/XLM-Roberta-Large-Vit-B-32) | **0.226** | **0.565** | **0.265** | **0.596** | 20 | | **Thai Encoder (WangchanBERTa-based)** | | | | | | | [Thai-Cross-CLIP](https://github.com/vikimark/Thai-Cross-CLIP) | 0.167 | 0.475 | 0.197 | 0.523 | 48 | | **Thai Encoder (Thai2Fit-based)** | | | | | | | [thai-light-multimodal-clip-and-distill](https://huggingface.co/patomp/thai-light-multimodal-clip-and-distill) | 0.082 | **0.328** | 0.118 |**0.401**| 450 | | [thai-light-multimodal-distill](https://huggingface.co/patomp/thai-light-multimodal-distill) | **0.084** | 0.319 | **0.122** |**0.401**| 450 | ## Reference Some part of this content referenced from https://huggingface.co/M-CLIP/XLM-Roberta-Large-Vit-B-32. For more detail, please visit https://github.com/calzonelover/Lightweight-Multi-modal-Encoder-for-Thai.
Betaniaolivo/Foto
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - funsd-layoutlmv3 model-index: - name: lilt-en-funsd results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # lilt-en-funsd This model is a fine-tuned version of [SCUT-DLVCLab/lilt-roberta-en-base](https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base) on the funsd-layoutlmv3 dataset. It achieves the following results on the evaluation set: - Loss: 1.4114 - Answer: {'precision': 0.8497175141242937, 'recall': 0.9204406364749081, 'f1': 0.8836662749706228, 'number': 817} - Header: {'precision': 0.6534653465346535, 'recall': 0.5546218487394958, 'f1': 0.6000000000000001, 'number': 119} - Question: {'precision': 0.8935018050541517, 'recall': 0.9192200557103064, 'f1': 0.9061784897025171, 'number': 1077} - Overall Precision: 0.8634 - Overall Recall: 0.8982 - Overall F1: 0.8804 - Overall Accuracy: 0.8253 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 2500 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Answer | Header | Question | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 0.575 | 5.26 | 200 | 0.8531 | {'precision': 0.7890295358649789, 'recall': 0.9155446756425949, 'f1': 0.8475920679886686, 'number': 817} | {'precision': 0.5416666666666666, 'recall': 0.3277310924369748, 'f1': 0.40837696335078527, 'number': 119} | {'precision': 0.8611599297012302, 'recall': 0.9099350046425255, 'f1': 0.8848758465011286, 'number': 1077} | 0.8188 | 0.8778 | 0.8473 | 0.7926 | | 0.119 | 10.53 | 400 | 1.1026 | {'precision': 0.8278688524590164, 'recall': 0.8653610771113831, 'f1': 0.846199880311191, 'number': 817} | {'precision': 0.5813953488372093, 'recall': 0.42016806722689076, 'f1': 0.48780487804878053, 'number': 119} | {'precision': 0.856655290102389, 'recall': 0.9322191272051996, 'f1': 0.8928412627834592, 'number': 1077} | 0.8338 | 0.8748 | 0.8538 | 0.8124 | | 0.0411 | 15.79 | 600 | 1.2238 | {'precision': 0.8713942307692307, 'recall': 0.8873929008567931, 'f1': 0.8793208004851426, 'number': 817} | {'precision': 0.552, 'recall': 0.5798319327731093, 'f1': 0.5655737704918032, 'number': 119} | {'precision': 0.8669527896995708, 'recall': 0.9377901578458682, 'f1': 0.9009812667261373, 'number': 1077} | 0.8501 | 0.8962 | 0.8726 | 0.8131 | | 0.0186 | 21.05 | 800 | 1.2807 | {'precision': 0.8607888631090487, 'recall': 0.9082007343941249, 'f1': 0.8838594401429422, 'number': 817} | {'precision': 0.5447154471544715, 'recall': 0.5630252100840336, 'f1': 0.5537190082644629, 'number': 119} | {'precision': 0.8921389396709324, 'recall': 0.9062209842154132, 'f1': 0.8991248272685398, 'number': 1077} | 0.8586 | 0.8867 | 0.8724 | 0.8162 | | 0.0098 | 26.32 | 1000 | 1.3494 | {'precision': 0.852233676975945, 'recall': 0.9106487148102815, 'f1': 0.8804733727810652, 'number': 817} | {'precision': 0.5511811023622047, 'recall': 0.5882352941176471, 'f1': 0.5691056910569106, 'number': 119} | {'precision': 0.8794964028776978, 'recall': 0.9080779944289693, 'f1': 0.8935587026039287, 'number': 1077} | 0.8485 | 0.8902 | 0.8688 | 0.8039 | | 0.0068 | 31.58 | 1200 | 1.3878 | {'precision': 0.8495475113122172, 'recall': 0.9192166462668299, 'f1': 0.8830099941211051, 'number': 817} | {'precision': 0.5565217391304348, 'recall': 0.5378151260504201, 'f1': 0.547008547008547, 'number': 119} | {'precision': 0.899624765478424, 'recall': 0.8904363974001857, 'f1': 0.8950069995333644, 'number': 1077} | 0.8591 | 0.8813 | 0.8700 | 0.8140 | | 0.0056 | 36.84 | 1400 | 1.4679 | {'precision': 0.8338833883388339, 'recall': 0.9277845777233782, 'f1': 0.8783314020857474, 'number': 817} | {'precision': 0.6442307692307693, 'recall': 0.5630252100840336, 'f1': 0.600896860986547, 'number': 119} | {'precision': 0.8971000935453695, 'recall': 0.8904363974001857, 'f1': 0.8937558247903075, 'number': 1077} | 0.8569 | 0.8862 | 0.8713 | 0.8117 | | 0.0033 | 42.11 | 1600 | 1.3959 | {'precision': 0.8463276836158192, 'recall': 0.9167686658506732, 'f1': 0.8801410105757932, 'number': 817} | {'precision': 0.5833333333333334, 'recall': 0.5882352941176471, 'f1': 0.5857740585774059, 'number': 119} | {'precision': 0.8939114391143912, 'recall': 0.8997214484679665, 'f1': 0.8968070337806571, 'number': 1077} | 0.8559 | 0.8882 | 0.8718 | 0.8177 | | 0.0013 | 47.37 | 1800 | 1.4114 | {'precision': 0.8497175141242937, 'recall': 0.9204406364749081, 'f1': 0.8836662749706228, 'number': 817} | {'precision': 0.6534653465346535, 'recall': 0.5546218487394958, 'f1': 0.6000000000000001, 'number': 119} | {'precision': 0.8935018050541517, 'recall': 0.9192200557103064, 'f1': 0.9061784897025171, 'number': 1077} | 0.8634 | 0.8982 | 0.8804 | 0.8253 | | 0.001 | 52.63 | 2000 | 1.3795 | {'precision': 0.8584795321637427, 'recall': 0.8984088127294981, 'f1': 0.8779904306220095, 'number': 817} | {'precision': 0.6306306306306306, 'recall': 0.5882352941176471, 'f1': 0.6086956521739131, 'number': 119} | {'precision': 0.8965201465201466, 'recall': 0.9090064995357474, 'f1': 0.9027201475334256, 'number': 1077} | 0.8664 | 0.8857 | 0.8760 | 0.8339 | | 0.0007 | 57.89 | 2200 | 1.4095 | {'precision': 0.8586206896551725, 'recall': 0.9143206854345165, 'f1': 0.8855957320687612, 'number': 817} | {'precision': 0.6055045871559633, 'recall': 0.5546218487394958, 'f1': 0.5789473684210525, 'number': 119} | {'precision': 0.8887884267631103, 'recall': 0.9127205199628597, 'f1': 0.9005955107650022, 'number': 1077} | 0.8614 | 0.8922 | 0.8765 | 0.8216 | | 0.0006 | 63.16 | 2400 | 1.4001 | {'precision': 0.8577981651376146, 'recall': 0.9155446756425949, 'f1': 0.8857312018946123, 'number': 817} | {'precision': 0.6216216216216216, 'recall': 0.5798319327731093, 'f1': 0.6000000000000001, 'number': 119} | {'precision': 0.895644283121597, 'recall': 0.9164345403899722, 'f1': 0.9059201468563561, 'number': 1077} | 0.8652 | 0.8962 | 0.8804 | 0.8282 | ### Framework versions - Transformers 4.28.1 - Pytorch 1.13.0+cu117 - Datasets 2.11.0 - Tokenizers 0.13.2
Bharathdamu/wav2vec2-large-xls-r-300m-hindi-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - generated_from_keras_callback model-index: - name: Bert_class_1e-10 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # Bert_class_1e-10 This model is a fine-tuned version of [guoluo/Bert_1.5e_07](https://huggingface.co/guoluo/Bert_1.5e_07) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.4794 - Train Accuracy: 0.1435 - Validation Loss: 1.4962 - Validation Accuracy: 0.1338 - Train Lr: 9.999547e-11 - Epoch: 999 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 9.999547e-11, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Train Lr | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-------------:|:-----:| | 1.4732 | 0.1671 | 1.5014 | 0.1338 | 1e-10 | 0 | | 1.4751 | 0.1412 | 1.5014 | 0.1338 | 1e-10 | 1 | | 1.4792 | 0.1388 | 1.5014 | 0.1338 | 1e-10 | 2 | | 1.4789 | 0.1388 | 1.5014 | 0.1338 | 1e-10 | 3 | | 1.4755 | 0.1482 | 1.5014 | 0.1338 | 1e-10 | 4 | | 1.4702 | 0.1482 | 1.5014 | 0.1338 | 1e-10 | 5 | | 1.4800 | 0.1388 | 1.5014 | 0.1338 | 1e-10 | 6 | | 1.4739 | 0.1576 | 1.5014 | 0.1338 | 1e-10 | 7 | | 1.4831 | 0.1435 | 1.5014 | 0.1338 | 1e-10 | 8 | | 1.4740 | 0.1459 | 1.5014 | 0.1338 | 1e-10 | 9 | | 1.4762 | 0.1482 | 1.5014 | 0.1338 | 1e-10 | 10 | | 1.4754 | 0.1388 | 1.5014 | 0.1338 | 1e-10 | 11 | | 1.4683 | 0.1506 | 1.5014 | 0.1338 | 1e-10 | 12 | | 1.4787 | 0.1553 | 1.5014 | 0.1338 | 1e-10 | 13 | | 1.4770 | 0.1388 | 1.5014 | 0.1338 | 1e-10 | 14 | | 1.4790 | 0.1388 | 1.5013 | 0.1338 | 1e-10 | 15 | | 1.4799 | 0.1388 | 1.5013 | 0.1338 | 1e-10 | 16 | | 1.4828 | 0.1388 | 1.5013 | 0.1338 | 1e-10 | 17 | | 1.4780 | 0.1412 | 1.5013 | 0.1338 | 1e-10 | 18 | | 1.4826 | 0.1271 | 1.5013 | 0.1338 | 1e-10 | 19 | | 1.4770 | 0.1365 | 1.5013 | 0.1338 | 1e-10 | 20 | | 1.4747 | 0.1388 | 1.5013 | 0.1338 | 1e-10 | 21 | | 1.4783 | 0.1482 | 1.5013 | 0.1338 | 1e-10 | 22 | | 1.4780 | 0.1506 | 1.5013 | 0.1338 | 1e-10 | 23 | | 1.4748 | 0.1388 | 1.5013 | 0.1338 | 1e-10 | 24 | | 1.4776 | 0.1553 | 1.5013 | 0.1338 | 1e-10 | 25 | | 1.4813 | 0.1459 | 1.5013 | 0.1338 | 1e-10 | 26 | | 1.4819 | 0.1412 | 1.5013 | 0.1338 | 1e-10 | 27 | | 1.4756 | 0.1435 | 1.5013 | 0.1338 | 1e-10 | 28 | | 1.4810 | 0.1435 | 1.5013 | 0.1338 | 1e-10 | 29 | | 1.4745 | 0.1529 | 1.5013 | 0.1338 | 1e-10 | 30 | | 1.4839 | 0.1341 | 1.5013 | 0.1338 | 1e-10 | 31 | | 1.4784 | 0.1318 | 1.5013 | 0.1338 | 1e-10 | 32 | | 1.4766 | 0.1412 | 1.5013 | 0.1338 | 1e-10 | 33 | | 1.4740 | 0.1365 | 1.5012 | 0.1338 | 1e-10 | 34 | | 1.4745 | 0.1529 | 1.5012 | 0.1338 | 1e-10 | 35 | | 1.4722 | 0.1412 | 1.5012 | 0.1338 | 1e-10 | 36 | | 1.4701 | 0.1506 | 1.5012 | 0.1338 | 1e-10 | 37 | | 1.4725 | 0.1388 | 1.5012 | 0.1338 | 1e-10 | 38 | | 1.4761 | 0.1459 | 1.5012 | 0.1338 | 1e-10 | 39 | | 1.4825 | 0.1553 | 1.5012 | 0.1338 | 1e-10 | 40 | | 1.4782 | 0.1412 | 1.5012 | 0.1338 | 1e-10 | 41 | | 1.4786 | 0.1200 | 1.5012 | 0.1338 | 1e-10 | 42 | | 1.4709 | 0.1576 | 1.5012 | 0.1338 | 1e-10 | 43 | | 1.4707 | 0.1318 | 1.5012 | 0.1338 | 1e-10 | 44 | | 1.4714 | 0.1435 | 1.5012 | 0.1338 | 1e-10 | 45 | | 1.4729 | 0.1365 | 1.5012 | 0.1338 | 1e-10 | 46 | | 1.4760 | 0.1694 | 1.5012 | 0.1338 | 1e-10 | 47 | | 1.4787 | 0.1553 | 1.5012 | 0.1338 | 1e-10 | 48 | | 1.4707 | 0.1365 | 1.5012 | 0.1338 | 1e-10 | 49 | | 1.4767 | 0.1506 | 1.5012 | 0.1338 | 1e-10 | 50 | | 1.4749 | 0.1412 | 1.5012 | 0.1338 | 1e-10 | 51 | | 1.4737 | 0.1482 | 1.5012 | 0.1338 | 1e-10 | 52 | | 1.4764 | 0.1365 | 1.5012 | 0.1338 | 1e-10 | 53 | | 1.4764 | 0.1412 | 1.5011 | 0.1338 | 1e-10 | 54 | | 1.4808 | 0.1294 | 1.5011 | 0.1338 | 1e-10 | 55 | | 1.4694 | 0.1365 | 1.5011 | 0.1338 | 1e-10 | 56 | | 1.4714 | 0.1294 | 1.5011 | 0.1338 | 1e-10 | 57 | | 1.4766 | 0.1318 | 1.5011 | 0.1338 | 1e-10 | 58 | | 1.4801 | 0.1388 | 1.5011 | 0.1338 | 1e-10 | 59 | | 1.4771 | 0.1435 | 1.5011 | 0.1338 | 1e-10 | 60 | | 1.4740 | 0.1294 | 1.5011 | 0.1338 | 1e-10 | 61 | | 1.4817 | 0.1341 | 1.5011 | 0.1338 | 1e-10 | 62 | | 1.4728 | 0.1459 | 1.5011 | 0.1338 | 1e-10 | 63 | | 1.4791 | 0.1318 | 1.5011 | 0.1338 | 1e-10 | 64 | | 1.4733 | 0.1224 | 1.5011 | 0.1338 | 1e-10 | 65 | | 1.4678 | 0.1506 | 1.5011 | 0.1338 | 1e-10 | 66 | | 1.4789 | 0.1153 | 1.5011 | 0.1338 | 1e-10 | 67 | | 1.4655 | 0.1529 | 1.5011 | 0.1338 | 1e-10 | 68 | | 1.4698 | 0.1576 | 1.5011 | 0.1338 | 1e-10 | 69 | | 1.4755 | 0.1365 | 1.5011 | 0.1338 | 1e-10 | 70 | | 1.4754 | 0.1412 | 1.5011 | 0.1338 | 1e-10 | 71 | | 1.4732 | 0.1341 | 1.5011 | 0.1338 | 1e-10 | 72 | | 1.4762 | 0.1224 | 1.5010 | 0.1338 | 1e-10 | 73 | | 1.4642 | 0.1435 | 1.5010 | 0.1338 | 1e-10 | 74 | | 1.4726 | 0.1506 | 1.5010 | 0.1338 | 1e-10 | 75 | | 1.4810 | 0.1506 | 1.5010 | 0.1338 | 1e-10 | 76 | | 1.4749 | 0.1341 | 1.5010 | 0.1338 | 1e-10 | 77 | | 1.4734 | 0.1459 | 1.5010 | 0.1338 | 1e-10 | 78 | | 1.4740 | 0.1247 | 1.5010 | 0.1338 | 1e-10 | 79 | | 1.4721 | 0.1412 | 1.5010 | 0.1338 | 1e-10 | 80 | | 1.4767 | 0.1435 | 1.5010 | 0.1338 | 1e-10 | 81 | | 1.4748 | 0.1435 | 1.5010 | 0.1338 | 1e-10 | 82 | | 1.4848 | 0.1412 | 1.5010 | 0.1338 | 1e-10 | 83 | | 1.4755 | 0.1341 | 1.5010 | 0.1338 | 1e-10 | 84 | | 1.4705 | 0.1600 | 1.5010 | 0.1338 | 1e-10 | 85 | | 1.4707 | 0.1624 | 1.5010 | 0.1338 | 1e-10 | 86 | | 1.4748 | 0.1459 | 1.5010 | 0.1338 | 1e-10 | 87 | | 1.4759 | 0.1388 | 1.5010 | 0.1338 | 1e-10 | 88 | | 1.4722 | 0.1576 | 1.5010 | 0.1338 | 1e-10 | 89 | | 1.4764 | 0.1482 | 1.5010 | 0.1338 | 1e-10 | 90 | | 1.4711 | 0.1624 | 1.5010 | 0.1338 | 1e-10 | 91 | | 1.4734 | 0.1412 | 1.5009 | 0.1338 | 1e-10 | 92 | | 1.4772 | 0.1224 | 1.5009 | 0.1338 | 1e-10 | 93 | | 1.4660 | 0.1506 | 1.5009 | 0.1338 | 1e-10 | 94 | | 1.4771 | 0.1529 | 1.5009 | 0.1338 | 1e-10 | 95 | | 1.4698 | 0.1341 | 1.5009 | 0.1338 | 1e-10 | 96 | | 1.4763 | 0.1388 | 1.5009 | 0.1338 | 1e-10 | 97 | | 1.4708 | 0.1459 | 1.5009 | 0.1338 | 1e-10 | 98 | | 1.4774 | 0.1412 | 1.5009 | 0.1338 | 1e-10 | 99 | | 1.4648 | 0.1506 | 1.5009 | 0.1338 | 1e-10 | 100 | | 1.4799 | 0.1412 | 1.5009 | 0.1338 | 1e-10 | 101 | | 1.4750 | 0.1506 | 1.5009 | 0.1338 | 1e-10 | 102 | | 1.4779 | 0.1388 | 1.5009 | 0.1338 | 1e-10 | 103 | | 1.4774 | 0.1435 | 1.5009 | 0.1338 | 1e-10 | 104 | | 1.4736 | 0.1341 | 1.5009 | 0.1338 | 1e-10 | 105 | | 1.4702 | 0.1318 | 1.5009 | 0.1338 | 1e-10 | 106 | | 1.4827 | 0.1341 | 1.5009 | 0.1338 | 1e-10 | 107 | | 1.4770 | 0.1294 | 1.5009 | 0.1338 | 1e-10 | 108 | | 1.4783 | 0.1482 | 1.5009 | 0.1338 | 1e-10 | 109 | | 1.4721 | 0.1459 | 1.5009 | 0.1338 | 1e-10 | 110 | | 1.4739 | 0.1365 | 1.5008 | 0.1338 | 1e-10 | 111 | | 1.4722 | 0.1318 | 1.5008 | 0.1338 | 1e-10 | 112 | | 1.4762 | 0.1247 | 1.5008 | 0.1338 | 1e-10 | 113 | | 1.4682 | 0.1294 | 1.5008 | 0.1338 | 1e-10 | 114 | | 1.4719 | 0.1388 | 1.5008 | 0.1338 | 1e-10 | 115 | | 1.4776 | 0.1529 | 1.5008 | 0.1338 | 1e-10 | 116 | | 1.4779 | 0.1412 | 1.5008 | 0.1338 | 1e-10 | 117 | | 1.4776 | 0.1200 | 1.5008 | 0.1338 | 1e-10 | 118 | | 1.4724 | 0.1200 | 1.5008 | 0.1338 | 1e-10 | 119 | | 1.4756 | 0.1341 | 1.5008 | 0.1338 | 1e-10 | 120 | | 1.4768 | 0.1459 | 1.5008 | 0.1338 | 1e-10 | 121 | | 1.4854 | 0.1294 | 1.5008 | 0.1338 | 1e-10 | 122 | | 1.4744 | 0.1388 | 1.5008 | 0.1338 | 1e-10 | 123 | | 1.4661 | 0.1459 | 1.5008 | 0.1338 | 1e-10 | 124 | | 1.4824 | 0.1412 | 1.5008 | 0.1338 | 1e-10 | 125 | | 1.4680 | 0.1576 | 1.5008 | 0.1338 | 1e-10 | 126 | | 1.4763 | 0.1365 | 1.5008 | 0.1338 | 1e-10 | 127 | | 1.4740 | 0.1435 | 1.5008 | 0.1338 | 1e-10 | 128 | | 1.4747 | 0.1553 | 1.5008 | 0.1338 | 1e-10 | 129 | | 1.4720 | 0.1365 | 1.5007 | 0.1338 | 1e-10 | 130 | | 1.4734 | 0.1294 | 1.5007 | 0.1338 | 1e-10 | 131 | | 1.4758 | 0.1365 | 1.5007 | 0.1338 | 1e-10 | 132 | | 1.4724 | 0.1365 | 1.5007 | 0.1338 | 1e-10 | 133 | | 1.4750 | 0.1341 | 1.5007 | 0.1338 | 1e-10 | 134 | | 1.4829 | 0.1412 | 1.5007 | 0.1338 | 1e-10 | 135 | | 1.4690 | 0.1365 | 1.5007 | 0.1338 | 1e-10 | 136 | | 1.4733 | 0.1506 | 1.5007 | 0.1338 | 1e-10 | 137 | | 1.4724 | 0.1459 | 1.5007 | 0.1338 | 1e-10 | 138 | | 1.4804 | 0.1271 | 1.5007 | 0.1338 | 1e-10 | 139 | | 1.4711 | 0.1482 | 1.5007 | 0.1338 | 1e-10 | 140 | | 1.4872 | 0.1318 | 1.5007 | 0.1338 | 1e-10 | 141 | | 1.4796 | 0.1341 | 1.5007 | 0.1338 | 1e-10 | 142 | | 1.4712 | 0.1576 | 1.5007 | 0.1338 | 1e-10 | 143 | | 1.4729 | 0.1435 | 1.5007 | 0.1338 | 1e-10 | 144 | | 1.4678 | 0.1624 | 1.5007 | 0.1338 | 1e-10 | 145 | | 1.4696 | 0.1553 | 1.5007 | 0.1338 | 1e-10 | 146 | | 1.4742 | 0.1412 | 1.5007 | 0.1338 | 1e-10 | 147 | | 1.4814 | 0.1365 | 1.5007 | 0.1338 | 1e-10 | 148 | | 1.4705 | 0.1224 | 1.5006 | 0.1338 | 1e-10 | 149 | | 1.4711 | 0.1176 | 1.5006 | 0.1338 | 1e-10 | 150 | | 1.4692 | 0.1459 | 1.5006 | 0.1338 | 1e-10 | 151 | | 1.4698 | 0.1529 | 1.5006 | 0.1338 | 1e-10 | 152 | | 1.4721 | 0.1459 | 1.5006 | 0.1338 | 1e-10 | 153 | | 1.4692 | 0.1482 | 1.5006 | 0.1338 | 1e-10 | 154 | | 1.4773 | 0.1341 | 1.5006 | 0.1338 | 1e-10 | 155 | | 1.4677 | 0.1553 | 1.5006 | 0.1338 | 1e-10 | 156 | | 1.4815 | 0.1271 | 1.5006 | 0.1338 | 1e-10 | 157 | | 1.4732 | 0.1271 | 1.5006 | 0.1338 | 1e-10 | 158 | | 1.4727 | 0.1529 | 1.5006 | 0.1338 | 1e-10 | 159 | | 1.4764 | 0.1482 | 1.5006 | 0.1338 | 1e-10 | 160 | | 1.4773 | 0.1412 | 1.5006 | 0.1338 | 1e-10 | 161 | | 1.4792 | 0.1435 | 1.5006 | 0.1338 | 1e-10 | 162 | | 1.4733 | 0.1529 | 1.5006 | 0.1338 | 1e-10 | 163 | | 1.4781 | 0.1435 | 1.5006 | 0.1338 | 1e-10 | 164 | | 1.4689 | 0.1318 | 1.5006 | 0.1338 | 1e-10 | 165 | | 1.4795 | 0.1459 | 1.5006 | 0.1338 | 1e-10 | 166 | | 1.4766 | 0.1294 | 1.5006 | 0.1338 | 1e-10 | 167 | | 1.4728 | 0.1459 | 1.5005 | 0.1338 | 1e-10 | 168 | | 1.4664 | 0.1435 | 1.5005 | 0.1338 | 1e-10 | 169 | | 1.4710 | 0.1388 | 1.5005 | 0.1338 | 1e-10 | 170 | | 1.4758 | 0.1435 | 1.5005 | 0.1338 | 1e-10 | 171 | | 1.4760 | 0.1412 | 1.5005 | 0.1338 | 1e-10 | 172 | | 1.4768 | 0.1388 | 1.5005 | 0.1338 | 1e-10 | 173 | | 1.4749 | 0.1459 | 1.5005 | 0.1338 | 1e-10 | 174 | | 1.4795 | 0.1506 | 1.5005 | 0.1338 | 1e-10 | 175 | | 1.4702 | 0.1459 | 1.5005 | 0.1338 | 1e-10 | 176 | | 1.4788 | 0.1271 | 1.5005 | 0.1338 | 1e-10 | 177 | | 1.4753 | 0.1435 | 1.5005 | 0.1338 | 1e-10 | 178 | | 1.4750 | 0.1388 | 1.5005 | 0.1338 | 1e-10 | 179 | | 1.4799 | 0.1459 | 1.5005 | 0.1338 | 1e-10 | 180 | | 1.4768 | 0.1365 | 1.5005 | 0.1338 | 1e-10 | 181 | | 1.4780 | 0.1459 | 1.5005 | 0.1338 | 1e-10 | 182 | | 1.4745 | 0.1224 | 1.5005 | 0.1338 | 1e-10 | 183 | | 1.4618 | 0.1624 | 1.5005 | 0.1338 | 1e-10 | 184 | | 1.4775 | 0.1553 | 1.5005 | 0.1338 | 1e-10 | 185 | | 1.4711 | 0.1435 | 1.5005 | 0.1338 | 1e-10 | 186 | | 1.4802 | 0.1388 | 1.5004 | 0.1338 | 1e-10 | 187 | | 1.4714 | 0.1529 | 1.5004 | 0.1338 | 1e-10 | 188 | | 1.4707 | 0.1482 | 1.5004 | 0.1338 | 1e-10 | 189 | | 1.4712 | 0.1647 | 1.5004 | 0.1338 | 1e-10 | 190 | | 1.4709 | 0.1435 | 1.5004 | 0.1338 | 1e-10 | 191 | | 1.4741 | 0.1459 | 1.5004 | 0.1338 | 1e-10 | 192 | | 1.4682 | 0.1553 | 1.5004 | 0.1338 | 1e-10 | 193 | | 1.4768 | 0.1224 | 1.5004 | 0.1338 | 1e-10 | 194 | | 1.4868 | 0.1388 | 1.5004 | 0.1338 | 1e-10 | 195 | | 1.4736 | 0.1600 | 1.5004 | 0.1338 | 1e-10 | 196 | | 1.4784 | 0.1388 | 1.5004 | 0.1338 | 1e-10 | 197 | | 1.4752 | 0.1365 | 1.5004 | 0.1338 | 1e-10 | 198 | | 1.4790 | 0.1506 | 1.5004 | 0.1338 | 1e-10 | 199 | | 1.4696 | 0.1412 | 1.5004 | 0.1338 | 1e-10 | 200 | | 1.4771 | 0.1435 | 1.5004 | 0.1338 | 1e-10 | 201 | | 1.4723 | 0.1412 | 1.5004 | 0.1338 | 1e-10 | 202 | | 1.4742 | 0.1294 | 1.5004 | 0.1338 | 1e-10 | 203 | | 1.4713 | 0.1529 | 1.5004 | 0.1338 | 1e-10 | 204 | | 1.4752 | 0.1412 | 1.5004 | 0.1338 | 1e-10 | 205 | | 1.4728 | 0.1365 | 1.5003 | 0.1338 | 1e-10 | 206 | | 1.4809 | 0.1388 | 1.5003 | 0.1338 | 1e-10 | 207 | | 1.4772 | 0.1388 | 1.5003 | 0.1338 | 1e-10 | 208 | | 1.4759 | 0.1506 | 1.5003 | 0.1338 | 1e-10 | 209 | | 1.4769 | 0.1482 | 1.5003 | 0.1338 | 1e-10 | 210 | | 1.4686 | 0.1388 | 1.5003 | 0.1338 | 1e-10 | 211 | | 1.4775 | 0.1506 | 1.5003 | 0.1338 | 1e-10 | 212 | | 1.4659 | 0.1412 | 1.5003 | 0.1338 | 1e-10 | 213 | | 1.4766 | 0.1176 | 1.5003 | 0.1338 | 1e-10 | 214 | | 1.4770 | 0.1341 | 1.5003 | 0.1338 | 1e-10 | 215 | | 1.4572 | 0.1600 | 1.5003 | 0.1338 | 1e-10 | 216 | | 1.4677 | 0.1318 | 1.5003 | 0.1338 | 1e-10 | 217 | | 1.4816 | 0.1224 | 1.5003 | 0.1338 | 1e-10 | 218 | | 1.4748 | 0.1600 | 1.5003 | 0.1338 | 1e-10 | 219 | | 1.4753 | 0.1529 | 1.5003 | 0.1338 | 1e-10 | 220 | | 1.4744 | 0.1247 | 1.5003 | 0.1338 | 1e-10 | 221 | | 1.4757 | 0.1459 | 1.5003 | 0.1338 | 1e-10 | 222 | | 1.4777 | 0.1365 | 1.5003 | 0.1338 | 1e-10 | 223 | | 1.4705 | 0.1459 | 1.5003 | 0.1338 | 1e-10 | 224 | | 1.4697 | 0.1506 | 1.5003 | 0.1338 | 1e-10 | 225 | | 1.4714 | 0.1341 | 1.5002 | 0.1338 | 1e-10 | 226 | | 1.4714 | 0.1365 | 1.5002 | 0.1338 | 1e-10 | 227 | | 1.4778 | 0.1459 | 1.5002 | 0.1338 | 1e-10 | 228 | | 1.4764 | 0.1506 | 1.5002 | 0.1338 | 1e-10 | 229 | | 1.4687 | 0.1741 | 1.5002 | 0.1338 | 1e-10 | 230 | | 1.4731 | 0.1506 | 1.5002 | 0.1338 | 1e-10 | 231 | | 1.4747 | 0.1341 | 1.5002 | 0.1338 | 1e-10 | 232 | | 1.4709 | 0.1412 | 1.5002 | 0.1338 | 1e-10 | 233 | | 1.4730 | 0.1553 | 1.5002 | 0.1338 | 1e-10 | 234 | | 1.4749 | 0.1388 | 1.5002 | 0.1338 | 1e-10 | 235 | | 1.4734 | 0.1271 | 1.5002 | 0.1338 | 1e-10 | 236 | | 1.4658 | 0.1506 | 1.5002 | 0.1338 | 1e-10 | 237 | | 1.4662 | 0.1576 | 1.5002 | 0.1338 | 1e-10 | 238 | | 1.4771 | 0.1459 | 1.5002 | 0.1338 | 1e-10 | 239 | | 1.4793 | 0.1365 | 1.5002 | 0.1338 | 1e-10 | 240 | | 1.4702 | 0.1318 | 1.5002 | 0.1338 | 1e-10 | 241 | | 1.4737 | 0.1341 | 1.5002 | 0.1338 | 1e-10 | 242 | | 1.4737 | 0.1459 | 1.5002 | 0.1338 | 1e-10 | 243 | | 1.4799 | 0.1435 | 1.5002 | 0.1338 | 1e-10 | 244 | | 1.4821 | 0.1435 | 1.5001 | 0.1338 | 1e-10 | 245 | | 1.4673 | 0.1529 | 1.5001 | 0.1338 | 1e-10 | 246 | | 1.4720 | 0.1482 | 1.5001 | 0.1338 | 1e-10 | 247 | | 1.4715 | 0.1600 | 1.5001 | 0.1338 | 1e-10 | 248 | | 1.4750 | 0.1647 | 1.5001 | 0.1338 | 1e-10 | 249 | | 1.4735 | 0.1341 | 1.5001 | 0.1338 | 1e-10 | 250 | | 1.4787 | 0.1341 | 1.5001 | 0.1338 | 1e-10 | 251 | | 1.4659 | 0.1600 | 1.5001 | 0.1338 | 1e-10 | 252 | | 1.4787 | 0.1529 | 1.5001 | 0.1338 | 1e-10 | 253 | | 1.4787 | 0.1341 | 1.5001 | 0.1338 | 1e-10 | 254 | | 1.4796 | 0.1435 | 1.5001 | 0.1338 | 1e-10 | 255 | | 1.4739 | 0.1506 | 1.5001 | 0.1338 | 1e-10 | 256 | | 1.4817 | 0.1318 | 1.5001 | 0.1338 | 1e-10 | 257 | | 1.4796 | 0.1412 | 1.5001 | 0.1338 | 1e-10 | 258 | | 1.4780 | 0.1341 | 1.5001 | 0.1338 | 1e-10 | 259 | | 1.4737 | 0.1341 | 1.5001 | 0.1338 | 1e-10 | 260 | | 1.4777 | 0.1412 | 1.5001 | 0.1338 | 1e-10 | 261 | | 1.4709 | 0.1459 | 1.5001 | 0.1338 | 1e-10 | 262 | | 1.4680 | 0.1576 | 1.5001 | 0.1338 | 1e-10 | 263 | | 1.4760 | 0.1506 | 1.5000 | 0.1338 | 1e-10 | 264 | | 1.4743 | 0.1482 | 1.5000 | 0.1338 | 1e-10 | 265 | | 1.4709 | 0.1553 | 1.5000 | 0.1338 | 1e-10 | 266 | | 1.4787 | 0.1294 | 1.5000 | 0.1338 | 1e-10 | 267 | | 1.4727 | 0.1482 | 1.5000 | 0.1338 | 1e-10 | 268 | | 1.4776 | 0.1553 | 1.5000 | 0.1338 | 1e-10 | 269 | | 1.4804 | 0.1247 | 1.5000 | 0.1338 | 1e-10 | 270 | | 1.4682 | 0.1529 | 1.5000 | 0.1338 | 1e-10 | 271 | | 1.4731 | 0.1435 | 1.5000 | 0.1338 | 1e-10 | 272 | | 1.4719 | 0.1482 | 1.5000 | 0.1338 | 1e-10 | 273 | | 1.4773 | 0.1506 | 1.5000 | 0.1338 | 1e-10 | 274 | | 1.4780 | 0.1294 | 1.5000 | 0.1338 | 1e-10 | 275 | | 1.4728 | 0.1506 | 1.5000 | 0.1338 | 1e-10 | 276 | | 1.4748 | 0.1459 | 1.5000 | 0.1338 | 1e-10 | 277 | | 1.4667 | 0.1341 | 1.5000 | 0.1338 | 1e-10 | 278 | | 1.4725 | 0.1459 | 1.5000 | 0.1338 | 1e-10 | 279 | | 1.4774 | 0.1388 | 1.5000 | 0.1338 | 1e-10 | 280 | | 1.4764 | 0.1529 | 1.5000 | 0.1338 | 1e-10 | 281 | | 1.4725 | 0.1388 | 1.5000 | 0.1338 | 1e-10 | 282 | | 1.4734 | 0.1435 | 1.4999 | 0.1338 | 1e-10 | 283 | | 1.4718 | 0.1506 | 1.4999 | 0.1338 | 1e-10 | 284 | | 1.4674 | 0.1482 | 1.4999 | 0.1338 | 1e-10 | 285 | | 1.4762 | 0.1435 | 1.4999 | 0.1338 | 1e-10 | 286 | | 1.4735 | 0.1482 | 1.4999 | 0.1338 | 1e-10 | 287 | | 1.4790 | 0.1294 | 1.4999 | 0.1338 | 1e-10 | 288 | | 1.4777 | 0.1388 | 1.4999 | 0.1338 | 1e-10 | 289 | | 1.4793 | 0.1576 | 1.4999 | 0.1338 | 1e-10 | 290 | | 1.4729 | 0.1435 | 1.4999 | 0.1338 | 1e-10 | 291 | | 1.4742 | 0.1506 | 1.4999 | 0.1338 | 1e-10 | 292 | | 1.4775 | 0.1341 | 1.4999 | 0.1338 | 1e-10 | 293 | | 1.4688 | 0.1482 | 1.4999 | 0.1338 | 1e-10 | 294 | | 1.4782 | 0.1247 | 1.4999 | 0.1338 | 1e-10 | 295 | | 1.4680 | 0.1482 | 1.4999 | 0.1338 | 1e-10 | 296 | | 1.4749 | 0.1365 | 1.4999 | 0.1338 | 1e-10 | 297 | | 1.4814 | 0.1176 | 1.4999 | 0.1338 | 1e-10 | 298 | | 1.4698 | 0.1388 | 1.4999 | 0.1338 | 1e-10 | 299 | | 1.4724 | 0.1529 | 1.4999 | 0.1338 | 1e-10 | 300 | | 1.4753 | 0.1459 | 1.4999 | 0.1338 | 1e-10 | 301 | | 1.4790 | 0.1341 | 1.4998 | 0.1338 | 1e-10 | 302 | | 1.4685 | 0.1529 | 1.4998 | 0.1338 | 1e-10 | 303 | | 1.4850 | 0.1341 | 1.4998 | 0.1338 | 1e-10 | 304 | | 1.4755 | 0.1435 | 1.4998 | 0.1338 | 1e-10 | 305 | | 1.4781 | 0.1341 | 1.4998 | 0.1338 | 1e-10 | 306 | | 1.4800 | 0.1341 | 1.4998 | 0.1338 | 1e-10 | 307 | | 1.4749 | 0.1529 | 1.4998 | 0.1338 | 1e-10 | 308 | | 1.4819 | 0.1271 | 1.4998 | 0.1338 | 1e-10 | 309 | | 1.4702 | 0.1529 | 1.4998 | 0.1338 | 1e-10 | 310 | | 1.4758 | 0.1459 | 1.4998 | 0.1338 | 1e-10 | 311 | | 1.4703 | 0.1529 | 1.4998 | 0.1338 | 1e-10 | 312 | | 1.4768 | 0.1365 | 1.4998 | 0.1338 | 1e-10 | 313 | | 1.4741 | 0.1294 | 1.4998 | 0.1338 | 1e-10 | 314 | | 1.4702 | 0.1506 | 1.4998 | 0.1338 | 1e-10 | 315 | | 1.4744 | 0.1647 | 1.4998 | 0.1338 | 1e-10 | 316 | | 1.4771 | 0.1482 | 1.4998 | 0.1338 | 1e-10 | 317 | | 1.4711 | 0.1506 | 1.4998 | 0.1338 | 1e-10 | 318 | | 1.4679 | 0.1506 | 1.4998 | 0.1338 | 1e-10 | 319 | | 1.4726 | 0.1459 | 1.4998 | 0.1338 | 1e-10 | 320 | | 1.4682 | 0.1435 | 1.4997 | 0.1338 | 1e-10 | 321 | | 1.4750 | 0.1506 | 1.4997 | 0.1338 | 1e-10 | 322 | | 1.4756 | 0.1482 | 1.4997 | 0.1338 | 1e-10 | 323 | | 1.4791 | 0.1365 | 1.4997 | 0.1338 | 1e-10 | 324 | | 1.4794 | 0.1200 | 1.4997 | 0.1338 | 1e-10 | 325 | | 1.4813 | 0.1435 | 1.4997 | 0.1338 | 1e-10 | 326 | | 1.4604 | 0.1318 | 1.4997 | 0.1338 | 1e-10 | 327 | | 1.4815 | 0.1247 | 1.4997 | 0.1338 | 1e-10 | 328 | | 1.4750 | 0.1412 | 1.4997 | 0.1338 | 1e-10 | 329 | | 1.4671 | 0.1459 | 1.4997 | 0.1338 | 1e-10 | 330 | | 1.4749 | 0.1576 | 1.4997 | 0.1338 | 1e-10 | 331 | | 1.4836 | 0.1341 | 1.4997 | 0.1338 | 1e-10 | 332 | | 1.4839 | 0.1624 | 1.4997 | 0.1338 | 1e-10 | 333 | | 1.4660 | 0.1412 | 1.4997 | 0.1338 | 1e-10 | 334 | | 1.4708 | 0.1318 | 1.4997 | 0.1338 | 1e-10 | 335 | | 1.4755 | 0.1271 | 1.4997 | 0.1338 | 1e-10 | 336 | | 1.4823 | 0.1318 | 1.4997 | 0.1338 | 1e-10 | 337 | | 1.4730 | 0.1318 | 1.4997 | 0.1338 | 1e-10 | 338 | | 1.4785 | 0.1459 | 1.4997 | 0.1338 | 1e-10 | 339 | | 1.4720 | 0.1412 | 1.4996 | 0.1338 | 1e-10 | 340 | | 1.4759 | 0.1459 | 1.4996 | 0.1338 | 1e-10 | 341 | | 1.4755 | 0.1482 | 1.4996 | 0.1338 | 1e-10 | 342 | | 1.4756 | 0.1365 | 1.4996 | 0.1338 | 1e-10 | 343 | | 1.4720 | 0.1459 | 1.4996 | 0.1338 | 1e-10 | 344 | | 1.4835 | 0.1388 | 1.4996 | 0.1338 | 1e-10 | 345 | | 1.4722 | 0.1412 | 1.4996 | 0.1338 | 1e-10 | 346 | | 1.4729 | 0.1271 | 1.4996 | 0.1338 | 9.9999994e-11 | 347 | | 1.4838 | 0.1271 | 1.4996 | 0.1338 | 9.999999e-11 | 348 | | 1.4722 | 0.1318 | 1.4996 | 0.1338 | 9.999998e-11 | 349 | | 1.4709 | 0.1459 | 1.4996 | 0.1338 | 9.9999974e-11 | 350 | | 1.4729 | 0.1388 | 1.4996 | 0.1338 | 9.999997e-11 | 351 | | 1.4751 | 0.1459 | 1.4996 | 0.1338 | 9.999996e-11 | 352 | | 1.4627 | 0.1553 | 1.4996 | 0.1338 | 9.999995e-11 | 353 | | 1.4719 | 0.1459 | 1.4996 | 0.1338 | 9.9999946e-11 | 354 | | 1.4696 | 0.1341 | 1.4996 | 0.1338 | 9.999994e-11 | 355 | | 1.4782 | 0.1435 | 1.4996 | 0.1338 | 9.999993e-11 | 356 | | 1.4692 | 0.1459 | 1.4996 | 0.1338 | 9.9999925e-11 | 357 | | 1.4685 | 0.1435 | 1.4996 | 0.1338 | 9.999992e-11 | 358 | | 1.4787 | 0.1459 | 1.4996 | 0.1338 | 9.999991e-11 | 359 | | 1.4783 | 0.1694 | 1.4995 | 0.1338 | 9.9999904e-11 | 360 | | 1.4746 | 0.1553 | 1.4995 | 0.1338 | 9.99999e-11 | 361 | | 1.4805 | 0.1388 | 1.4995 | 0.1338 | 9.999989e-11 | 362 | | 1.4651 | 0.1365 | 1.4995 | 0.1338 | 9.999988e-11 | 363 | | 1.4713 | 0.1435 | 1.4995 | 0.1338 | 9.9999876e-11 | 364 | | 1.4753 | 0.1341 | 1.4995 | 0.1338 | 9.999987e-11 | 365 | | 1.4764 | 0.1529 | 1.4995 | 0.1338 | 9.999986e-11 | 366 | | 1.4719 | 0.1412 | 1.4995 | 0.1338 | 9.9999856e-11 | 367 | | 1.4746 | 0.1412 | 1.4995 | 0.1338 | 9.999985e-11 | 368 | | 1.4736 | 0.1341 | 1.4995 | 0.1338 | 9.999984e-11 | 369 | | 1.4636 | 0.1553 | 1.4995 | 0.1338 | 9.9999835e-11 | 370 | | 1.4680 | 0.1576 | 1.4995 | 0.1338 | 9.999983e-11 | 371 | | 1.4725 | 0.1341 | 1.4995 | 0.1338 | 9.999982e-11 | 372 | | 1.4738 | 0.1388 | 1.4995 | 0.1338 | 9.9999814e-11 | 373 | | 1.4777 | 0.1506 | 1.4995 | 0.1338 | 9.999981e-11 | 374 | | 1.4710 | 0.1671 | 1.4995 | 0.1338 | 9.99998e-11 | 375 | | 1.4726 | 0.1506 | 1.4995 | 0.1338 | 9.999979e-11 | 376 | | 1.4744 | 0.1365 | 1.4995 | 0.1338 | 9.9999786e-11 | 377 | | 1.4731 | 0.1529 | 1.4995 | 0.1338 | 9.999978e-11 | 378 | | 1.4713 | 0.1506 | 1.4994 | 0.1338 | 9.999977e-11 | 379 | | 1.4790 | 0.1412 | 1.4994 | 0.1338 | 9.9999765e-11 | 380 | | 1.4689 | 0.1388 | 1.4994 | 0.1338 | 9.999976e-11 | 381 | | 1.4708 | 0.1482 | 1.4994 | 0.1338 | 9.999975e-11 | 382 | | 1.4705 | 0.1529 | 1.4994 | 0.1338 | 9.9999745e-11 | 383 | | 1.4658 | 0.1506 | 1.4994 | 0.1338 | 9.999974e-11 | 384 | | 1.4758 | 0.1200 | 1.4994 | 0.1338 | 9.999973e-11 | 385 | | 1.4812 | 0.1365 | 1.4994 | 0.1338 | 9.9999724e-11 | 386 | | 1.4773 | 0.1694 | 1.4994 | 0.1338 | 9.999972e-11 | 387 | | 1.4729 | 0.1506 | 1.4994 | 0.1338 | 9.999971e-11 | 388 | | 1.4729 | 0.1459 | 1.4994 | 0.1338 | 9.99997e-11 | 389 | | 1.4796 | 0.1365 | 1.4994 | 0.1338 | 9.9999696e-11 | 390 | | 1.4763 | 0.1294 | 1.4994 | 0.1338 | 9.999969e-11 | 391 | | 1.4733 | 0.1529 | 1.4994 | 0.1338 | 9.999968e-11 | 392 | | 1.4726 | 0.1435 | 1.4994 | 0.1338 | 9.9999675e-11 | 393 | | 1.4699 | 0.1318 | 1.4994 | 0.1338 | 9.999967e-11 | 394 | | 1.4724 | 0.1318 | 1.4994 | 0.1338 | 9.999966e-11 | 395 | | 1.4767 | 0.1388 | 1.4994 | 0.1338 | 9.9999654e-11 | 396 | | 1.4733 | 0.1341 | 1.4994 | 0.1338 | 9.999965e-11 | 397 | | 1.4769 | 0.1459 | 1.4993 | 0.1338 | 9.999964e-11 | 398 | | 1.4744 | 0.1482 | 1.4993 | 0.1338 | 9.9999634e-11 | 399 | | 1.4739 | 0.1435 | 1.4993 | 0.1338 | 9.999963e-11 | 400 | | 1.4746 | 0.1482 | 1.4993 | 0.1338 | 9.999962e-11 | 401 | | 1.4725 | 0.1412 | 1.4993 | 0.1338 | 9.999961e-11 | 402 | | 1.4665 | 0.1459 | 1.4993 | 0.1338 | 9.9999606e-11 | 403 | | 1.4791 | 0.1506 | 1.4993 | 0.1338 | 9.99996e-11 | 404 | | 1.4747 | 0.1506 | 1.4993 | 0.1338 | 9.999959e-11 | 405 | | 1.4770 | 0.1247 | 1.4993 | 0.1338 | 9.9999585e-11 | 406 | | 1.4773 | 0.1529 | 1.4993 | 0.1338 | 9.999958e-11 | 407 | | 1.4832 | 0.1318 | 1.4993 | 0.1338 | 9.999957e-11 | 408 | | 1.4728 | 0.1271 | 1.4993 | 0.1338 | 9.9999564e-11 | 409 | | 1.4714 | 0.1553 | 1.4993 | 0.1338 | 9.999956e-11 | 410 | | 1.4758 | 0.1365 | 1.4993 | 0.1338 | 9.999955e-11 | 411 | | 1.4740 | 0.1459 | 1.4993 | 0.1338 | 9.999954e-11 | 412 | | 1.4737 | 0.1365 | 1.4993 | 0.1338 | 9.9999536e-11 | 413 | | 1.4786 | 0.1529 | 1.4993 | 0.1338 | 9.999953e-11 | 414 | | 1.4694 | 0.1459 | 1.4993 | 0.1338 | 9.999952e-11 | 415 | | 1.4720 | 0.1459 | 1.4993 | 0.1338 | 9.9999516e-11 | 416 | | 1.4761 | 0.1294 | 1.4992 | 0.1338 | 9.999951e-11 | 417 | | 1.4761 | 0.1318 | 1.4992 | 0.1338 | 9.99995e-11 | 418 | | 1.4724 | 0.1459 | 1.4992 | 0.1338 | 9.9999495e-11 | 419 | | 1.4760 | 0.1459 | 1.4992 | 0.1338 | 9.999949e-11 | 420 | | 1.4735 | 0.1412 | 1.4992 | 0.1338 | 9.999948e-11 | 421 | | 1.4752 | 0.1318 | 1.4992 | 0.1338 | 9.9999474e-11 | 422 | | 1.4748 | 0.1600 | 1.4992 | 0.1338 | 9.999947e-11 | 423 | | 1.4777 | 0.1435 | 1.4992 | 0.1338 | 9.999946e-11 | 424 | | 1.4714 | 0.1482 | 1.4992 | 0.1338 | 9.999945e-11 | 425 | | 1.4729 | 0.1506 | 1.4992 | 0.1338 | 9.9999446e-11 | 426 | | 1.4768 | 0.1294 | 1.4992 | 0.1338 | 9.999944e-11 | 427 | | 1.4718 | 0.1482 | 1.4992 | 0.1338 | 9.999943e-11 | 428 | | 1.4783 | 0.1271 | 1.4992 | 0.1338 | 9.9999425e-11 | 429 | | 1.4735 | 0.1553 | 1.4992 | 0.1338 | 9.999942e-11 | 430 | | 1.4762 | 0.1388 | 1.4992 | 0.1338 | 9.999941e-11 | 431 | | 1.4698 | 0.1388 | 1.4992 | 0.1338 | 9.9999405e-11 | 432 | | 1.4655 | 0.1529 | 1.4992 | 0.1338 | 9.99994e-11 | 433 | | 1.4725 | 0.1412 | 1.4992 | 0.1338 | 9.999939e-11 | 434 | | 1.4738 | 0.1506 | 1.4992 | 0.1338 | 9.9999384e-11 | 435 | | 1.4737 | 0.1506 | 1.4991 | 0.1338 | 9.999938e-11 | 436 | | 1.4704 | 0.1435 | 1.4991 | 0.1338 | 9.999937e-11 | 437 | | 1.4824 | 0.1271 | 1.4991 | 0.1338 | 9.999936e-11 | 438 | | 1.4713 | 0.1341 | 1.4991 | 0.1338 | 9.9999356e-11 | 439 | | 1.4707 | 0.1412 | 1.4991 | 0.1338 | 9.999935e-11 | 440 | | 1.4721 | 0.1482 | 1.4991 | 0.1338 | 9.999934e-11 | 441 | | 1.4667 | 0.1435 | 1.4991 | 0.1338 | 9.9999335e-11 | 442 | | 1.4793 | 0.1365 | 1.4991 | 0.1338 | 9.999933e-11 | 443 | | 1.4746 | 0.1412 | 1.4991 | 0.1338 | 9.999932e-11 | 444 | | 1.4637 | 0.1506 | 1.4991 | 0.1338 | 9.9999314e-11 | 445 | | 1.4701 | 0.1529 | 1.4991 | 0.1338 | 9.999931e-11 | 446 | | 1.4666 | 0.1506 | 1.4991 | 0.1338 | 9.99993e-11 | 447 | | 1.4796 | 0.1318 | 1.4991 | 0.1338 | 9.9999294e-11 | 448 | | 1.4729 | 0.1412 | 1.4991 | 0.1338 | 9.999929e-11 | 449 | | 1.4725 | 0.1482 | 1.4991 | 0.1338 | 9.999928e-11 | 450 | | 1.4731 | 0.1412 | 1.4991 | 0.1338 | 9.999927e-11 | 451 | | 1.4723 | 0.1506 | 1.4991 | 0.1338 | 9.9999266e-11 | 452 | | 1.4744 | 0.1341 | 1.4991 | 0.1338 | 9.999926e-11 | 453 | | 1.4746 | 0.1459 | 1.4991 | 0.1338 | 9.999925e-11 | 454 | | 1.4702 | 0.1318 | 1.4990 | 0.1338 | 9.9999245e-11 | 455 | | 1.4721 | 0.1459 | 1.4990 | 0.1338 | 9.999924e-11 | 456 | | 1.4824 | 0.1459 | 1.4990 | 0.1338 | 9.999923e-11 | 457 | | 1.4732 | 0.1459 | 1.4990 | 0.1338 | 9.9999224e-11 | 458 | | 1.4740 | 0.1482 | 1.4990 | 0.1338 | 9.999922e-11 | 459 | | 1.4729 | 0.1482 | 1.4990 | 0.1338 | 9.999921e-11 | 460 | | 1.4746 | 0.1576 | 1.4990 | 0.1338 | 9.99992e-11 | 461 | | 1.4771 | 0.1365 | 1.4990 | 0.1338 | 9.9999196e-11 | 462 | | 1.4809 | 0.1412 | 1.4990 | 0.1338 | 9.999919e-11 | 463 | | 1.4774 | 0.1365 | 1.4990 | 0.1338 | 9.999918e-11 | 464 | | 1.4741 | 0.1459 | 1.4990 | 0.1338 | 9.9999176e-11 | 465 | | 1.4811 | 0.1388 | 1.4990 | 0.1338 | 9.999917e-11 | 466 | | 1.4776 | 0.1459 | 1.4990 | 0.1338 | 9.999916e-11 | 467 | | 1.4663 | 0.1506 | 1.4990 | 0.1338 | 9.9999155e-11 | 468 | | 1.4666 | 0.1482 | 1.4990 | 0.1338 | 9.999915e-11 | 469 | | 1.4814 | 0.1294 | 1.4990 | 0.1338 | 9.999914e-11 | 470 | | 1.4720 | 0.1271 | 1.4990 | 0.1338 | 9.9999134e-11 | 471 | | 1.4668 | 0.1247 | 1.4990 | 0.1338 | 9.999913e-11 | 472 | | 1.4647 | 0.1671 | 1.4990 | 0.1338 | 9.999912e-11 | 473 | | 1.4674 | 0.1624 | 1.4989 | 0.1338 | 9.999911e-11 | 474 | | 1.4724 | 0.1553 | 1.4989 | 0.1338 | 9.9999106e-11 | 475 | | 1.4711 | 0.1435 | 1.4989 | 0.1338 | 9.99991e-11 | 476 | | 1.4685 | 0.1482 | 1.4989 | 0.1338 | 9.999909e-11 | 477 | | 1.4784 | 0.1388 | 1.4989 | 0.1338 | 9.9999085e-11 | 478 | | 1.4728 | 0.1341 | 1.4989 | 0.1338 | 9.999908e-11 | 479 | | 1.4708 | 0.1412 | 1.4989 | 0.1338 | 9.999907e-11 | 480 | | 1.4691 | 0.1553 | 1.4989 | 0.1338 | 9.9999065e-11 | 481 | | 1.4713 | 0.1506 | 1.4989 | 0.1338 | 9.999906e-11 | 482 | | 1.4732 | 0.1341 | 1.4989 | 0.1338 | 9.999905e-11 | 483 | | 1.4727 | 0.1271 | 1.4989 | 0.1338 | 9.9999044e-11 | 484 | | 1.4751 | 0.1435 | 1.4989 | 0.1338 | 9.999904e-11 | 485 | | 1.4721 | 0.1671 | 1.4989 | 0.1338 | 9.999903e-11 | 486 | | 1.4662 | 0.1341 | 1.4989 | 0.1338 | 9.999902e-11 | 487 | | 1.4711 | 0.1459 | 1.4989 | 0.1338 | 9.9999016e-11 | 488 | | 1.4743 | 0.1529 | 1.4989 | 0.1338 | 9.999901e-11 | 489 | | 1.4648 | 0.1529 | 1.4989 | 0.1338 | 9.9999e-11 | 490 | | 1.4762 | 0.1435 | 1.4989 | 0.1338 | 9.9998995e-11 | 491 | | 1.4683 | 0.1318 | 1.4989 | 0.1338 | 9.999899e-11 | 492 | | 1.4702 | 0.1624 | 1.4989 | 0.1338 | 9.999898e-11 | 493 | | 1.4717 | 0.1482 | 1.4988 | 0.1338 | 9.9998974e-11 | 494 | | 1.4753 | 0.1435 | 1.4988 | 0.1338 | 9.999897e-11 | 495 | | 1.4775 | 0.1341 | 1.4988 | 0.1338 | 9.999896e-11 | 496 | | 1.4755 | 0.1624 | 1.4988 | 0.1338 | 9.9998954e-11 | 497 | | 1.4748 | 0.1224 | 1.4988 | 0.1338 | 9.999895e-11 | 498 | | 1.4704 | 0.1365 | 1.4988 | 0.1338 | 9.999894e-11 | 499 | | 1.4710 | 0.1341 | 1.4988 | 0.1338 | 9.999893e-11 | 500 | | 1.4720 | 0.1412 | 1.4988 | 0.1338 | 9.9998926e-11 | 501 | | 1.4743 | 0.1600 | 1.4988 | 0.1338 | 9.999892e-11 | 502 | | 1.4698 | 0.1459 | 1.4988 | 0.1338 | 9.999891e-11 | 503 | | 1.4730 | 0.1506 | 1.4988 | 0.1338 | 9.9998905e-11 | 504 | | 1.4699 | 0.1318 | 1.4988 | 0.1338 | 9.99989e-11 | 505 | | 1.4714 | 0.1459 | 1.4988 | 0.1338 | 9.999889e-11 | 506 | | 1.4741 | 0.1553 | 1.4988 | 0.1338 | 9.9998884e-11 | 507 | | 1.4878 | 0.1318 | 1.4988 | 0.1338 | 9.999888e-11 | 508 | | 1.4759 | 0.1365 | 1.4988 | 0.1338 | 9.999887e-11 | 509 | | 1.4716 | 0.1506 | 1.4988 | 0.1338 | 9.999886e-11 | 510 | | 1.4715 | 0.1294 | 1.4988 | 0.1338 | 9.9998856e-11 | 511 | | 1.4750 | 0.1600 | 1.4988 | 0.1338 | 9.999885e-11 | 512 | | 1.4700 | 0.1459 | 1.4987 | 0.1338 | 9.999884e-11 | 513 | | 1.4716 | 0.1553 | 1.4987 | 0.1338 | 9.9998836e-11 | 514 | | 1.4749 | 0.1318 | 1.4987 | 0.1338 | 9.999883e-11 | 515 | | 1.4646 | 0.1529 | 1.4987 | 0.1338 | 9.999882e-11 | 516 | | 1.4695 | 0.1482 | 1.4987 | 0.1338 | 9.9998815e-11 | 517 | | 1.4741 | 0.1341 | 1.4987 | 0.1338 | 9.999881e-11 | 518 | | 1.4748 | 0.1318 | 1.4987 | 0.1338 | 9.99988e-11 | 519 | | 1.4698 | 0.1294 | 1.4987 | 0.1338 | 9.9998794e-11 | 520 | | 1.4750 | 0.1365 | 1.4987 | 0.1338 | 9.999879e-11 | 521 | | 1.4663 | 0.1553 | 1.4987 | 0.1338 | 9.999878e-11 | 522 | | 1.4771 | 0.1412 | 1.4987 | 0.1338 | 9.999877e-11 | 523 | | 1.4859 | 0.1388 | 1.4987 | 0.1338 | 9.9998766e-11 | 524 | | 1.4818 | 0.1294 | 1.4987 | 0.1338 | 9.999876e-11 | 525 | | 1.4770 | 0.1576 | 1.4987 | 0.1338 | 9.999875e-11 | 526 | | 1.4692 | 0.1576 | 1.4987 | 0.1338 | 9.9998745e-11 | 527 | | 1.4794 | 0.1482 | 1.4987 | 0.1338 | 9.999874e-11 | 528 | | 1.4737 | 0.1529 | 1.4987 | 0.1338 | 9.999873e-11 | 529 | | 1.4730 | 0.1271 | 1.4987 | 0.1338 | 9.9998725e-11 | 530 | | 1.4738 | 0.1388 | 1.4987 | 0.1338 | 9.999872e-11 | 531 | | 1.4749 | 0.1459 | 1.4986 | 0.1338 | 9.999871e-11 | 532 | | 1.4724 | 0.1412 | 1.4986 | 0.1338 | 9.9998704e-11 | 533 | | 1.4698 | 0.1459 | 1.4986 | 0.1338 | 9.99987e-11 | 534 | | 1.4821 | 0.1247 | 1.4986 | 0.1338 | 9.999869e-11 | 535 | | 1.4726 | 0.1459 | 1.4986 | 0.1338 | 9.999868e-11 | 536 | | 1.4703 | 0.1529 | 1.4986 | 0.1338 | 9.9998676e-11 | 537 | | 1.4682 | 0.1576 | 1.4986 | 0.1338 | 9.999867e-11 | 538 | | 1.4790 | 0.1459 | 1.4986 | 0.1338 | 9.999866e-11 | 539 | | 1.4691 | 0.1647 | 1.4986 | 0.1338 | 9.9998655e-11 | 540 | | 1.4718 | 0.1271 | 1.4986 | 0.1338 | 9.999865e-11 | 541 | | 1.4690 | 0.1271 | 1.4986 | 0.1338 | 9.999864e-11 | 542 | | 1.4813 | 0.1341 | 1.4986 | 0.1338 | 9.9998634e-11 | 543 | | 1.4767 | 0.1365 | 1.4986 | 0.1338 | 9.999863e-11 | 544 | | 1.4742 | 0.1553 | 1.4986 | 0.1338 | 9.999862e-11 | 545 | | 1.4610 | 0.1412 | 1.4986 | 0.1338 | 9.9998614e-11 | 546 | | 1.4812 | 0.1482 | 1.4986 | 0.1338 | 9.999861e-11 | 547 | | 1.4643 | 0.1388 | 1.4986 | 0.1338 | 9.99986e-11 | 548 | | 1.4648 | 0.1459 | 1.4986 | 0.1338 | 9.999859e-11 | 549 | | 1.4720 | 0.1459 | 1.4986 | 0.1338 | 9.9998586e-11 | 550 | | 1.4751 | 0.1459 | 1.4985 | 0.1338 | 9.999858e-11 | 551 | | 1.4738 | 0.1341 | 1.4985 | 0.1338 | 9.999857e-11 | 552 | | 1.4729 | 0.1412 | 1.4985 | 0.1338 | 9.9998565e-11 | 553 | | 1.4799 | 0.1412 | 1.4985 | 0.1338 | 9.999856e-11 | 554 | | 1.4699 | 0.1341 | 1.4985 | 0.1338 | 9.999855e-11 | 555 | | 1.4727 | 0.1318 | 1.4985 | 0.1338 | 9.9998544e-11 | 556 | | 1.4766 | 0.1341 | 1.4985 | 0.1338 | 9.999854e-11 | 557 | | 1.4673 | 0.1435 | 1.4985 | 0.1338 | 9.999853e-11 | 558 | | 1.4669 | 0.1388 | 1.4985 | 0.1338 | 9.999852e-11 | 559 | | 1.4774 | 0.1412 | 1.4985 | 0.1338 | 9.9998516e-11 | 560 | | 1.4741 | 0.1412 | 1.4985 | 0.1338 | 9.999851e-11 | 561 | | 1.4693 | 0.1435 | 1.4985 | 0.1338 | 9.99985e-11 | 562 | | 1.4793 | 0.1388 | 1.4985 | 0.1338 | 9.9998496e-11 | 563 | | 1.4788 | 0.1435 | 1.4985 | 0.1338 | 9.999849e-11 | 564 | | 1.4709 | 0.1624 | 1.4985 | 0.1338 | 9.999848e-11 | 565 | | 1.4732 | 0.1388 | 1.4985 | 0.1338 | 9.9998475e-11 | 566 | | 1.4734 | 0.1412 | 1.4985 | 0.1338 | 9.999847e-11 | 567 | | 1.4719 | 0.1529 | 1.4985 | 0.1338 | 9.999846e-11 | 568 | | 1.4706 | 0.1459 | 1.4985 | 0.1338 | 9.9998454e-11 | 569 | | 1.4657 | 0.1529 | 1.4984 | 0.1338 | 9.999845e-11 | 570 | | 1.4775 | 0.1459 | 1.4984 | 0.1338 | 9.999844e-11 | 571 | | 1.4719 | 0.1576 | 1.4984 | 0.1338 | 9.999843e-11 | 572 | | 1.4761 | 0.1412 | 1.4984 | 0.1338 | 9.9998426e-11 | 573 | | 1.4745 | 0.1459 | 1.4984 | 0.1338 | 9.999842e-11 | 574 | | 1.4759 | 0.1318 | 1.4984 | 0.1338 | 9.999841e-11 | 575 | | 1.4654 | 0.1482 | 1.4984 | 0.1338 | 9.9998405e-11 | 576 | | 1.4672 | 0.1600 | 1.4984 | 0.1338 | 9.99984e-11 | 577 | | 1.4761 | 0.1435 | 1.4984 | 0.1338 | 9.999839e-11 | 578 | | 1.4760 | 0.1529 | 1.4984 | 0.1338 | 9.9998385e-11 | 579 | | 1.4728 | 0.1412 | 1.4984 | 0.1338 | 9.999838e-11 | 580 | | 1.4768 | 0.1412 | 1.4984 | 0.1338 | 9.999837e-11 | 581 | | 1.4736 | 0.1412 | 1.4984 | 0.1338 | 9.9998364e-11 | 582 | | 1.4779 | 0.1318 | 1.4984 | 0.1338 | 9.999836e-11 | 583 | | 1.4745 | 0.1647 | 1.4984 | 0.1338 | 9.999835e-11 | 584 | | 1.4694 | 0.1529 | 1.4984 | 0.1338 | 9.999834e-11 | 585 | | 1.4707 | 0.1435 | 1.4984 | 0.1338 | 9.9998336e-11 | 586 | | 1.4645 | 0.1506 | 1.4984 | 0.1338 | 9.999833e-11 | 587 | | 1.4747 | 0.1388 | 1.4984 | 0.1338 | 9.999832e-11 | 588 | | 1.4683 | 0.1435 | 1.4983 | 0.1338 | 9.9998315e-11 | 589 | | 1.4733 | 0.1412 | 1.4983 | 0.1338 | 9.999831e-11 | 590 | | 1.4651 | 0.1388 | 1.4983 | 0.1338 | 9.99983e-11 | 591 | | 1.4742 | 0.1388 | 1.4983 | 0.1338 | 9.9998294e-11 | 592 | | 1.4765 | 0.1435 | 1.4983 | 0.1338 | 9.999829e-11 | 593 | | 1.4695 | 0.1553 | 1.4983 | 0.1338 | 9.999828e-11 | 594 | | 1.4696 | 0.1412 | 1.4983 | 0.1338 | 9.9998274e-11 | 595 | | 1.4733 | 0.1294 | 1.4983 | 0.1338 | 9.999827e-11 | 596 | | 1.4689 | 0.1435 | 1.4983 | 0.1338 | 9.999826e-11 | 597 | | 1.4727 | 0.1388 | 1.4983 | 0.1338 | 9.999825e-11 | 598 | | 1.4714 | 0.1553 | 1.4983 | 0.1338 | 9.9998246e-11 | 599 | | 1.4773 | 0.1318 | 1.4983 | 0.1338 | 9.999824e-11 | 600 | | 1.4743 | 0.1553 | 1.4983 | 0.1338 | 9.999823e-11 | 601 | | 1.4741 | 0.1294 | 1.4983 | 0.1338 | 9.9998225e-11 | 602 | | 1.4693 | 0.1506 | 1.4983 | 0.1338 | 9.999822e-11 | 603 | | 1.4767 | 0.1341 | 1.4983 | 0.1338 | 9.999821e-11 | 604 | | 1.4762 | 0.1459 | 1.4983 | 0.1338 | 9.9998204e-11 | 605 | | 1.4791 | 0.1271 | 1.4983 | 0.1338 | 9.99982e-11 | 606 | | 1.4745 | 0.1412 | 1.4983 | 0.1338 | 9.999819e-11 | 607 | | 1.4706 | 0.1576 | 1.4982 | 0.1338 | 9.999818e-11 | 608 | | 1.4704 | 0.1412 | 1.4982 | 0.1338 | 9.9998176e-11 | 609 | | 1.4826 | 0.1553 | 1.4982 | 0.1338 | 9.999817e-11 | 610 | | 1.4783 | 0.1247 | 1.4982 | 0.1338 | 9.999816e-11 | 611 | | 1.4783 | 0.1529 | 1.4982 | 0.1338 | 9.9998156e-11 | 612 | | 1.4799 | 0.1482 | 1.4982 | 0.1338 | 9.999815e-11 | 613 | | 1.4732 | 0.1459 | 1.4982 | 0.1338 | 9.999814e-11 | 614 | | 1.4630 | 0.1624 | 1.4982 | 0.1338 | 9.9998135e-11 | 615 | | 1.4710 | 0.1482 | 1.4982 | 0.1338 | 9.999813e-11 | 616 | | 1.4665 | 0.1318 | 1.4982 | 0.1338 | 9.999812e-11 | 617 | | 1.4760 | 0.1529 | 1.4982 | 0.1338 | 9.9998114e-11 | 618 | | 1.4696 | 0.1576 | 1.4982 | 0.1338 | 9.999811e-11 | 619 | | 1.4699 | 0.1647 | 1.4982 | 0.1338 | 9.99981e-11 | 620 | | 1.4788 | 0.1318 | 1.4982 | 0.1338 | 9.999809e-11 | 621 | | 1.4685 | 0.1435 | 1.4982 | 0.1338 | 9.9998086e-11 | 622 | | 1.4771 | 0.1200 | 1.4982 | 0.1338 | 9.999808e-11 | 623 | | 1.4768 | 0.1435 | 1.4982 | 0.1338 | 9.999807e-11 | 624 | | 1.4726 | 0.1600 | 1.4982 | 0.1338 | 9.9998065e-11 | 625 | | 1.4660 | 0.1459 | 1.4982 | 0.1338 | 9.999806e-11 | 626 | | 1.4760 | 0.1247 | 1.4982 | 0.1338 | 9.999805e-11 | 627 | | 1.4731 | 0.1482 | 1.4981 | 0.1338 | 9.9998045e-11 | 628 | | 1.4701 | 0.1412 | 1.4981 | 0.1338 | 9.999804e-11 | 629 | | 1.4733 | 0.1412 | 1.4981 | 0.1338 | 9.999803e-11 | 630 | | 1.4682 | 0.1365 | 1.4981 | 0.1338 | 9.9998024e-11 | 631 | | 1.4741 | 0.1365 | 1.4981 | 0.1338 | 9.999802e-11 | 632 | | 1.4801 | 0.1318 | 1.4981 | 0.1338 | 9.999801e-11 | 633 | | 1.4657 | 0.1553 | 1.4981 | 0.1338 | 9.9998e-11 | 634 | | 1.4670 | 0.1482 | 1.4981 | 0.1338 | 9.9997996e-11 | 635 | | 1.4755 | 0.1435 | 1.4981 | 0.1338 | 9.999799e-11 | 636 | | 1.4753 | 0.1412 | 1.4981 | 0.1338 | 9.999798e-11 | 637 | | 1.4775 | 0.1271 | 1.4981 | 0.1338 | 9.9997975e-11 | 638 | | 1.4678 | 0.1600 | 1.4981 | 0.1338 | 9.999797e-11 | 639 | | 1.4653 | 0.1341 | 1.4981 | 0.1338 | 9.999796e-11 | 640 | | 1.4708 | 0.1671 | 1.4981 | 0.1338 | 9.9997954e-11 | 641 | | 1.4729 | 0.1200 | 1.4981 | 0.1338 | 9.999795e-11 | 642 | | 1.4726 | 0.1318 | 1.4981 | 0.1338 | 9.999794e-11 | 643 | | 1.4733 | 0.1553 | 1.4981 | 0.1338 | 9.9997934e-11 | 644 | | 1.4681 | 0.1459 | 1.4981 | 0.1338 | 9.999793e-11 | 645 | | 1.4804 | 0.1365 | 1.4981 | 0.1338 | 9.999792e-11 | 646 | | 1.4756 | 0.1506 | 1.4980 | 0.1338 | 9.999791e-11 | 647 | | 1.4690 | 0.1365 | 1.4980 | 0.1338 | 9.9997906e-11 | 648 | | 1.4788 | 0.1318 | 1.4980 | 0.1338 | 9.99979e-11 | 649 | | 1.4690 | 0.1294 | 1.4980 | 0.1338 | 9.999789e-11 | 650 | | 1.4714 | 0.1365 | 1.4980 | 0.1338 | 9.9997885e-11 | 651 | | 1.4715 | 0.1647 | 1.4980 | 0.1338 | 9.999788e-11 | 652 | | 1.4819 | 0.1388 | 1.4980 | 0.1338 | 9.999787e-11 | 653 | | 1.4689 | 0.1365 | 1.4980 | 0.1338 | 9.9997864e-11 | 654 | | 1.4725 | 0.1341 | 1.4980 | 0.1338 | 9.999786e-11 | 655 | | 1.4817 | 0.1482 | 1.4980 | 0.1338 | 9.999785e-11 | 656 | | 1.4753 | 0.1529 | 1.4980 | 0.1338 | 9.999784e-11 | 657 | | 1.4751 | 0.1435 | 1.4980 | 0.1338 | 9.9997836e-11 | 658 | | 1.4698 | 0.1459 | 1.4980 | 0.1338 | 9.999783e-11 | 659 | | 1.4745 | 0.1435 | 1.4980 | 0.1338 | 9.999782e-11 | 660 | | 1.4743 | 0.1318 | 1.4980 | 0.1338 | 9.9997816e-11 | 661 | | 1.4747 | 0.1435 | 1.4980 | 0.1338 | 9.999781e-11 | 662 | | 1.4770 | 0.1318 | 1.4980 | 0.1338 | 9.99978e-11 | 663 | | 1.4719 | 0.1388 | 1.4980 | 0.1338 | 9.9997795e-11 | 664 | | 1.4758 | 0.1247 | 1.4980 | 0.1338 | 9.999779e-11 | 665 | | 1.4790 | 0.1341 | 1.4979 | 0.1338 | 9.999778e-11 | 666 | | 1.4749 | 0.1553 | 1.4979 | 0.1338 | 9.9997774e-11 | 667 | | 1.4841 | 0.1271 | 1.4979 | 0.1338 | 9.999777e-11 | 668 | | 1.4719 | 0.1459 | 1.4979 | 0.1338 | 9.999776e-11 | 669 | | 1.4717 | 0.1529 | 1.4979 | 0.1338 | 9.999775e-11 | 670 | | 1.4717 | 0.1318 | 1.4979 | 0.1338 | 9.9997746e-11 | 671 | | 1.4686 | 0.1341 | 1.4979 | 0.1338 | 9.999774e-11 | 672 | | 1.4741 | 0.1412 | 1.4979 | 0.1338 | 9.999773e-11 | 673 | | 1.4667 | 0.1553 | 1.4979 | 0.1338 | 9.9997725e-11 | 674 | | 1.4719 | 0.1529 | 1.4979 | 0.1338 | 9.999772e-11 | 675 | | 1.4716 | 0.1600 | 1.4979 | 0.1338 | 9.999771e-11 | 676 | | 1.4615 | 0.1718 | 1.4979 | 0.1338 | 9.9997705e-11 | 677 | | 1.4726 | 0.1482 | 1.4979 | 0.1338 | 9.99977e-11 | 678 | | 1.4748 | 0.1388 | 1.4979 | 0.1338 | 9.999769e-11 | 679 | | 1.4703 | 0.1529 | 1.4979 | 0.1338 | 9.9997684e-11 | 680 | | 1.4763 | 0.1224 | 1.4979 | 0.1338 | 9.999768e-11 | 681 | | 1.4674 | 0.1576 | 1.4979 | 0.1338 | 9.999767e-11 | 682 | | 1.4685 | 0.1482 | 1.4979 | 0.1338 | 9.999766e-11 | 683 | | 1.4791 | 0.1318 | 1.4979 | 0.1338 | 9.9997656e-11 | 684 | | 1.4715 | 0.1412 | 1.4978 | 0.1338 | 9.999765e-11 | 685 | | 1.4640 | 0.1506 | 1.4978 | 0.1338 | 9.999764e-11 | 686 | | 1.4791 | 0.1459 | 1.4978 | 0.1338 | 9.9997635e-11 | 687 | | 1.4751 | 0.1506 | 1.4978 | 0.1338 | 9.999763e-11 | 688 | | 1.4760 | 0.1459 | 1.4978 | 0.1338 | 9.999762e-11 | 689 | | 1.4727 | 0.1482 | 1.4978 | 0.1338 | 9.9997614e-11 | 690 | | 1.4657 | 0.1576 | 1.4978 | 0.1338 | 9.999761e-11 | 691 | | 1.4701 | 0.1294 | 1.4978 | 0.1338 | 9.99976e-11 | 692 | | 1.4739 | 0.1459 | 1.4978 | 0.1338 | 9.9997594e-11 | 693 | | 1.4714 | 0.1341 | 1.4978 | 0.1338 | 9.999759e-11 | 694 | | 1.4685 | 0.1435 | 1.4978 | 0.1338 | 9.999758e-11 | 695 | | 1.4755 | 0.1365 | 1.4978 | 0.1338 | 9.999757e-11 | 696 | | 1.4738 | 0.1412 | 1.4978 | 0.1338 | 9.9997566e-11 | 697 | | 1.4744 | 0.1318 | 1.4978 | 0.1338 | 9.999756e-11 | 698 | | 1.4724 | 0.1388 | 1.4978 | 0.1338 | 9.999755e-11 | 699 | | 1.4713 | 0.1506 | 1.4978 | 0.1338 | 9.9997545e-11 | 700 | | 1.4778 | 0.1412 | 1.4978 | 0.1338 | 9.999754e-11 | 701 | | 1.4713 | 0.1435 | 1.4978 | 0.1338 | 9.999753e-11 | 702 | | 1.4761 | 0.1482 | 1.4978 | 0.1338 | 9.9997524e-11 | 703 | | 1.4723 | 0.1506 | 1.4977 | 0.1338 | 9.999752e-11 | 704 | | 1.4657 | 0.1459 | 1.4977 | 0.1338 | 9.999751e-11 | 705 | | 1.4665 | 0.1553 | 1.4977 | 0.1338 | 9.99975e-11 | 706 | | 1.4657 | 0.1576 | 1.4977 | 0.1338 | 9.9997496e-11 | 707 | | 1.4746 | 0.1506 | 1.4977 | 0.1338 | 9.999749e-11 | 708 | | 1.4702 | 0.1459 | 1.4977 | 0.1338 | 9.999748e-11 | 709 | | 1.4784 | 0.1412 | 1.4977 | 0.1338 | 9.9997476e-11 | 710 | | 1.4700 | 0.1459 | 1.4977 | 0.1338 | 9.999747e-11 | 711 | | 1.4760 | 0.1412 | 1.4977 | 0.1338 | 9.999746e-11 | 712 | | 1.4777 | 0.1365 | 1.4977 | 0.1338 | 9.9997455e-11 | 713 | | 1.4673 | 0.1459 | 1.4977 | 0.1338 | 9.999745e-11 | 714 | | 1.4676 | 0.1506 | 1.4977 | 0.1338 | 9.999744e-11 | 715 | | 1.4773 | 0.1365 | 1.4977 | 0.1338 | 9.9997434e-11 | 716 | | 1.4737 | 0.1459 | 1.4977 | 0.1338 | 9.999743e-11 | 717 | | 1.4729 | 0.1529 | 1.4977 | 0.1338 | 9.999742e-11 | 718 | | 1.4777 | 0.1576 | 1.4977 | 0.1338 | 9.999741e-11 | 719 | | 1.4730 | 0.1459 | 1.4977 | 0.1338 | 9.9997406e-11 | 720 | | 1.4661 | 0.1529 | 1.4977 | 0.1338 | 9.99974e-11 | 721 | | 1.4761 | 0.1294 | 1.4977 | 0.1338 | 9.999739e-11 | 722 | | 1.4747 | 0.1506 | 1.4976 | 0.1338 | 9.9997385e-11 | 723 | | 1.4720 | 0.1459 | 1.4976 | 0.1338 | 9.999738e-11 | 724 | | 1.4616 | 0.1506 | 1.4976 | 0.1338 | 9.999737e-11 | 725 | | 1.4706 | 0.1624 | 1.4976 | 0.1338 | 9.9997365e-11 | 726 | | 1.4649 | 0.1529 | 1.4976 | 0.1338 | 9.999736e-11 | 727 | | 1.4750 | 0.1435 | 1.4976 | 0.1338 | 9.999735e-11 | 728 | | 1.4692 | 0.1271 | 1.4976 | 0.1338 | 9.9997344e-11 | 729 | | 1.4699 | 0.1529 | 1.4976 | 0.1338 | 9.999734e-11 | 730 | | 1.4699 | 0.1576 | 1.4976 | 0.1338 | 9.999733e-11 | 731 | | 1.4666 | 0.1529 | 1.4976 | 0.1338 | 9.999732e-11 | 732 | | 1.4693 | 0.1388 | 1.4976 | 0.1338 | 9.9997316e-11 | 733 | | 1.4740 | 0.1388 | 1.4976 | 0.1338 | 9.999731e-11 | 734 | | 1.4656 | 0.1459 | 1.4976 | 0.1338 | 9.99973e-11 | 735 | | 1.4661 | 0.1435 | 1.4976 | 0.1338 | 9.9997295e-11 | 736 | | 1.4737 | 0.1435 | 1.4976 | 0.1338 | 9.999729e-11 | 737 | | 1.4735 | 0.1412 | 1.4976 | 0.1338 | 9.999728e-11 | 738 | | 1.4743 | 0.1247 | 1.4976 | 0.1338 | 9.9997274e-11 | 739 | | 1.4690 | 0.1294 | 1.4976 | 0.1338 | 9.999727e-11 | 740 | | 1.4662 | 0.1459 | 1.4976 | 0.1338 | 9.999726e-11 | 741 | | 1.4682 | 0.1694 | 1.4976 | 0.1338 | 9.9997254e-11 | 742 | | 1.4660 | 0.1600 | 1.4975 | 0.1338 | 9.999725e-11 | 743 | | 1.4690 | 0.1624 | 1.4975 | 0.1338 | 9.999724e-11 | 744 | | 1.4635 | 0.1624 | 1.4975 | 0.1338 | 9.999723e-11 | 745 | | 1.4766 | 0.1388 | 1.4975 | 0.1338 | 9.9997226e-11 | 746 | | 1.4736 | 0.1271 | 1.4975 | 0.1338 | 9.999722e-11 | 747 | | 1.4796 | 0.1176 | 1.4975 | 0.1338 | 9.999721e-11 | 748 | | 1.4689 | 0.1506 | 1.4975 | 0.1338 | 9.9997205e-11 | 749 | | 1.4771 | 0.1271 | 1.4975 | 0.1338 | 9.99972e-11 | 750 | | 1.4728 | 0.1388 | 1.4975 | 0.1338 | 9.999719e-11 | 751 | | 1.4729 | 0.1365 | 1.4975 | 0.1338 | 9.9997184e-11 | 752 | | 1.4749 | 0.1341 | 1.4975 | 0.1338 | 9.999718e-11 | 753 | | 1.4726 | 0.1271 | 1.4975 | 0.1338 | 9.999717e-11 | 754 | | 1.4748 | 0.1482 | 1.4975 | 0.1338 | 9.999716e-11 | 755 | | 1.4708 | 0.1624 | 1.4975 | 0.1338 | 9.9997156e-11 | 756 | | 1.4683 | 0.1576 | 1.4975 | 0.1338 | 9.999715e-11 | 757 | | 1.4761 | 0.1412 | 1.4975 | 0.1338 | 9.999714e-11 | 758 | | 1.4750 | 0.1318 | 1.4975 | 0.1338 | 9.9997136e-11 | 759 | | 1.4734 | 0.1247 | 1.4975 | 0.1338 | 9.999713e-11 | 760 | | 1.4670 | 0.1553 | 1.4975 | 0.1338 | 9.999712e-11 | 761 | | 1.4735 | 0.1482 | 1.4974 | 0.1338 | 9.9997115e-11 | 762 | | 1.4608 | 0.1553 | 1.4974 | 0.1338 | 9.999711e-11 | 763 | | 1.4739 | 0.1600 | 1.4974 | 0.1338 | 9.99971e-11 | 764 | | 1.4723 | 0.1388 | 1.4974 | 0.1338 | 9.9997094e-11 | 765 | | 1.4740 | 0.1482 | 1.4974 | 0.1338 | 9.999709e-11 | 766 | | 1.4706 | 0.1435 | 1.4974 | 0.1338 | 9.999708e-11 | 767 | | 1.4749 | 0.1271 | 1.4974 | 0.1338 | 9.999707e-11 | 768 | | 1.4735 | 0.1294 | 1.4974 | 0.1338 | 9.9997066e-11 | 769 | | 1.4764 | 0.1247 | 1.4974 | 0.1338 | 9.999706e-11 | 770 | | 1.4722 | 0.1412 | 1.4974 | 0.1338 | 9.999705e-11 | 771 | | 1.4776 | 0.1388 | 1.4974 | 0.1338 | 9.9997045e-11 | 772 | | 1.4704 | 0.1271 | 1.4974 | 0.1338 | 9.999704e-11 | 773 | | 1.4726 | 0.1482 | 1.4974 | 0.1338 | 9.999703e-11 | 774 | | 1.4706 | 0.1459 | 1.4974 | 0.1338 | 9.9997025e-11 | 775 | | 1.4663 | 0.1459 | 1.4974 | 0.1338 | 9.999702e-11 | 776 | | 1.4720 | 0.1365 | 1.4974 | 0.1338 | 9.999701e-11 | 777 | | 1.4655 | 0.1435 | 1.4974 | 0.1338 | 9.9997004e-11 | 778 | | 1.4741 | 0.1576 | 1.4974 | 0.1338 | 9.9997e-11 | 779 | | 1.4744 | 0.1318 | 1.4974 | 0.1338 | 9.999699e-11 | 780 | | 1.4765 | 0.1388 | 1.4973 | 0.1338 | 9.999698e-11 | 781 | | 1.4773 | 0.1412 | 1.4973 | 0.1338 | 9.9996976e-11 | 782 | | 1.4629 | 0.1506 | 1.4973 | 0.1338 | 9.999697e-11 | 783 | | 1.4703 | 0.1529 | 1.4973 | 0.1338 | 9.999696e-11 | 784 | | 1.4703 | 0.1435 | 1.4973 | 0.1338 | 9.9996955e-11 | 785 | | 1.4707 | 0.1365 | 1.4973 | 0.1338 | 9.999695e-11 | 786 | | 1.4775 | 0.1247 | 1.4973 | 0.1338 | 9.999694e-11 | 787 | | 1.4685 | 0.1600 | 1.4973 | 0.1338 | 9.9996934e-11 | 788 | | 1.4733 | 0.1459 | 1.4973 | 0.1338 | 9.999693e-11 | 789 | | 1.4815 | 0.1318 | 1.4973 | 0.1338 | 9.999692e-11 | 790 | | 1.4751 | 0.1224 | 1.4973 | 0.1338 | 9.9996914e-11 | 791 | | 1.4659 | 0.1247 | 1.4973 | 0.1338 | 9.999691e-11 | 792 | | 1.4786 | 0.1412 | 1.4973 | 0.1338 | 9.99969e-11 | 793 | | 1.4624 | 0.1553 | 1.4973 | 0.1338 | 9.999689e-11 | 794 | | 1.4695 | 0.1624 | 1.4973 | 0.1338 | 9.9996886e-11 | 795 | | 1.4753 | 0.1506 | 1.4973 | 0.1338 | 9.999688e-11 | 796 | | 1.4775 | 0.1247 | 1.4973 | 0.1338 | 9.999687e-11 | 797 | | 1.4776 | 0.1318 | 1.4973 | 0.1338 | 9.9996865e-11 | 798 | | 1.4691 | 0.1459 | 1.4973 | 0.1338 | 9.999686e-11 | 799 | | 1.4734 | 0.1341 | 1.4972 | 0.1338 | 9.999685e-11 | 800 | | 1.4737 | 0.1388 | 1.4972 | 0.1338 | 9.9996844e-11 | 801 | | 1.4672 | 0.1459 | 1.4972 | 0.1338 | 9.999684e-11 | 802 | | 1.4789 | 0.1388 | 1.4972 | 0.1338 | 9.999683e-11 | 803 | | 1.4670 | 0.1365 | 1.4972 | 0.1338 | 9.999682e-11 | 804 | | 1.4760 | 0.1294 | 1.4972 | 0.1338 | 9.9996816e-11 | 805 | | 1.4772 | 0.1365 | 1.4972 | 0.1338 | 9.999681e-11 | 806 | | 1.4679 | 0.1412 | 1.4972 | 0.1338 | 9.99968e-11 | 807 | | 1.4724 | 0.1482 | 1.4972 | 0.1338 | 9.9996796e-11 | 808 | | 1.4758 | 0.1435 | 1.4972 | 0.1338 | 9.999679e-11 | 809 | | 1.4800 | 0.1412 | 1.4972 | 0.1338 | 9.999678e-11 | 810 | | 1.4656 | 0.1624 | 1.4972 | 0.1338 | 9.9996775e-11 | 811 | | 1.4683 | 0.1576 | 1.4972 | 0.1338 | 9.999677e-11 | 812 | | 1.4766 | 0.1365 | 1.4972 | 0.1338 | 9.999676e-11 | 813 | | 1.4799 | 0.1271 | 1.4972 | 0.1338 | 9.9996754e-11 | 814 | | 1.4712 | 0.1459 | 1.4972 | 0.1338 | 9.999675e-11 | 815 | | 1.4757 | 0.1294 | 1.4972 | 0.1338 | 9.999674e-11 | 816 | | 1.4739 | 0.1294 | 1.4972 | 0.1338 | 9.999673e-11 | 817 | | 1.4717 | 0.1459 | 1.4972 | 0.1338 | 9.9996726e-11 | 818 | | 1.4698 | 0.1506 | 1.4971 | 0.1338 | 9.999672e-11 | 819 | | 1.4713 | 0.1553 | 1.4971 | 0.1338 | 9.999671e-11 | 820 | | 1.4729 | 0.1529 | 1.4971 | 0.1338 | 9.9996705e-11 | 821 | | 1.4724 | 0.1482 | 1.4971 | 0.1338 | 9.99967e-11 | 822 | | 1.4732 | 0.1318 | 1.4971 | 0.1338 | 9.999669e-11 | 823 | | 1.4754 | 0.1365 | 1.4971 | 0.1338 | 9.9996685e-11 | 824 | | 1.4807 | 0.1388 | 1.4971 | 0.1338 | 9.999668e-11 | 825 | | 1.4737 | 0.1435 | 1.4971 | 0.1338 | 9.999667e-11 | 826 | | 1.4671 | 0.1506 | 1.4971 | 0.1338 | 9.9996664e-11 | 827 | | 1.4745 | 0.1435 | 1.4971 | 0.1338 | 9.999666e-11 | 828 | | 1.4667 | 0.1459 | 1.4971 | 0.1338 | 9.999665e-11 | 829 | | 1.4679 | 0.1435 | 1.4971 | 0.1338 | 9.999664e-11 | 830 | | 1.4668 | 0.1553 | 1.4971 | 0.1338 | 9.9996636e-11 | 831 | | 1.4755 | 0.1341 | 1.4971 | 0.1338 | 9.999663e-11 | 832 | | 1.4724 | 0.1224 | 1.4971 | 0.1338 | 9.999662e-11 | 833 | | 1.4662 | 0.1529 | 1.4971 | 0.1338 | 9.9996615e-11 | 834 | | 1.4751 | 0.1647 | 1.4971 | 0.1338 | 9.999661e-11 | 835 | | 1.4721 | 0.1506 | 1.4971 | 0.1338 | 9.99966e-11 | 836 | | 1.4751 | 0.1412 | 1.4971 | 0.1338 | 9.9996594e-11 | 837 | | 1.4733 | 0.1412 | 1.4970 | 0.1338 | 9.999659e-11 | 838 | | 1.4761 | 0.1388 | 1.4970 | 0.1338 | 9.999658e-11 | 839 | | 1.4704 | 0.1435 | 1.4970 | 0.1338 | 9.9996574e-11 | 840 | | 1.4783 | 0.1341 | 1.4970 | 0.1338 | 9.999657e-11 | 841 | | 1.4719 | 0.1459 | 1.4970 | 0.1338 | 9.999656e-11 | 842 | | 1.4625 | 0.1482 | 1.4970 | 0.1338 | 9.999655e-11 | 843 | | 1.4659 | 0.1318 | 1.4970 | 0.1338 | 9.9996546e-11 | 844 | | 1.4670 | 0.1624 | 1.4970 | 0.1338 | 9.999654e-11 | 845 | | 1.4725 | 0.1506 | 1.4970 | 0.1338 | 9.999653e-11 | 846 | | 1.4698 | 0.1271 | 1.4970 | 0.1338 | 9.9996525e-11 | 847 | | 1.4734 | 0.1529 | 1.4970 | 0.1338 | 9.999652e-11 | 848 | | 1.4781 | 0.1388 | 1.4970 | 0.1338 | 9.999651e-11 | 849 | | 1.4682 | 0.1600 | 1.4970 | 0.1338 | 9.9996504e-11 | 850 | | 1.4739 | 0.1153 | 1.4970 | 0.1338 | 9.99965e-11 | 851 | | 1.4642 | 0.1600 | 1.4970 | 0.1338 | 9.999649e-11 | 852 | | 1.4703 | 0.1553 | 1.4970 | 0.1338 | 9.999648e-11 | 853 | | 1.4602 | 0.1576 | 1.4970 | 0.1338 | 9.9996476e-11 | 854 | | 1.4613 | 0.1435 | 1.4970 | 0.1338 | 9.999647e-11 | 855 | | 1.4713 | 0.1482 | 1.4970 | 0.1338 | 9.999646e-11 | 856 | | 1.4653 | 0.1365 | 1.4970 | 0.1338 | 9.9996456e-11 | 857 | | 1.4708 | 0.1459 | 1.4969 | 0.1338 | 9.999645e-11 | 858 | | 1.4649 | 0.1506 | 1.4969 | 0.1338 | 9.999644e-11 | 859 | | 1.4663 | 0.1482 | 1.4969 | 0.1338 | 9.9996435e-11 | 860 | | 1.4643 | 0.1412 | 1.4969 | 0.1338 | 9.999643e-11 | 861 | | 1.4701 | 0.1529 | 1.4969 | 0.1338 | 9.999642e-11 | 862 | | 1.4738 | 0.1318 | 1.4969 | 0.1338 | 9.9996414e-11 | 863 | | 1.4668 | 0.1459 | 1.4969 | 0.1338 | 9.999641e-11 | 864 | | 1.4665 | 0.1647 | 1.4969 | 0.1338 | 9.99964e-11 | 865 | | 1.4733 | 0.1271 | 1.4969 | 0.1338 | 9.999639e-11 | 866 | | 1.4776 | 0.1482 | 1.4969 | 0.1338 | 9.9996386e-11 | 867 | | 1.4639 | 0.1435 | 1.4969 | 0.1338 | 9.999638e-11 | 868 | | 1.4681 | 0.1435 | 1.4969 | 0.1338 | 9.999637e-11 | 869 | | 1.4752 | 0.1341 | 1.4969 | 0.1338 | 9.9996365e-11 | 870 | | 1.4635 | 0.1412 | 1.4969 | 0.1338 | 9.999636e-11 | 871 | | 1.4703 | 0.1412 | 1.4969 | 0.1338 | 9.999635e-11 | 872 | | 1.4803 | 0.1294 | 1.4969 | 0.1338 | 9.9996345e-11 | 873 | | 1.4737 | 0.1294 | 1.4969 | 0.1338 | 9.999634e-11 | 874 | | 1.4744 | 0.1553 | 1.4969 | 0.1338 | 9.999633e-11 | 875 | | 1.4771 | 0.1412 | 1.4969 | 0.1338 | 9.9996324e-11 | 876 | | 1.4663 | 0.1482 | 1.4968 | 0.1338 | 9.999632e-11 | 877 | | 1.4740 | 0.1224 | 1.4968 | 0.1338 | 9.999631e-11 | 878 | | 1.4758 | 0.1576 | 1.4968 | 0.1338 | 9.99963e-11 | 879 | | 1.4815 | 0.1412 | 1.4968 | 0.1338 | 9.9996296e-11 | 880 | | 1.4721 | 0.1529 | 1.4968 | 0.1338 | 9.999629e-11 | 881 | | 1.4738 | 0.1388 | 1.4968 | 0.1338 | 9.999628e-11 | 882 | | 1.4626 | 0.1529 | 1.4968 | 0.1338 | 9.9996275e-11 | 883 | | 1.4703 | 0.1365 | 1.4968 | 0.1338 | 9.999627e-11 | 884 | | 1.4682 | 0.1624 | 1.4968 | 0.1338 | 9.999626e-11 | 885 | | 1.4777 | 0.1412 | 1.4968 | 0.1338 | 9.9996254e-11 | 886 | | 1.4710 | 0.1506 | 1.4968 | 0.1338 | 9.999625e-11 | 887 | | 1.4740 | 0.1247 | 1.4968 | 0.1338 | 9.999624e-11 | 888 | | 1.4736 | 0.1459 | 1.4968 | 0.1338 | 9.9996234e-11 | 889 | | 1.4775 | 0.1341 | 1.4968 | 0.1338 | 9.999623e-11 | 890 | | 1.4711 | 0.1576 | 1.4968 | 0.1338 | 9.999622e-11 | 891 | | 1.4716 | 0.1388 | 1.4968 | 0.1338 | 9.999621e-11 | 892 | | 1.4756 | 0.1482 | 1.4968 | 0.1338 | 9.9996206e-11 | 893 | | 1.4725 | 0.1600 | 1.4968 | 0.1338 | 9.99962e-11 | 894 | | 1.4757 | 0.1459 | 1.4968 | 0.1338 | 9.999619e-11 | 895 | | 1.4709 | 0.1341 | 1.4967 | 0.1338 | 9.9996185e-11 | 896 | | 1.4695 | 0.1388 | 1.4967 | 0.1338 | 9.999618e-11 | 897 | | 1.4732 | 0.1435 | 1.4967 | 0.1338 | 9.999617e-11 | 898 | | 1.4733 | 0.1459 | 1.4967 | 0.1338 | 9.9996164e-11 | 899 | | 1.4682 | 0.1576 | 1.4967 | 0.1338 | 9.999616e-11 | 900 | | 1.4674 | 0.1435 | 1.4967 | 0.1338 | 9.999615e-11 | 901 | | 1.4713 | 0.1482 | 1.4967 | 0.1338 | 9.999614e-11 | 902 | | 1.4737 | 0.1388 | 1.4967 | 0.1338 | 9.9996136e-11 | 903 | | 1.4719 | 0.1482 | 1.4967 | 0.1338 | 9.999613e-11 | 904 | | 1.4724 | 0.1365 | 1.4967 | 0.1338 | 9.999612e-11 | 905 | | 1.4707 | 0.1529 | 1.4967 | 0.1338 | 9.9996116e-11 | 906 | | 1.4754 | 0.1341 | 1.4967 | 0.1338 | 9.999611e-11 | 907 | | 1.4783 | 0.1318 | 1.4967 | 0.1338 | 9.99961e-11 | 908 | | 1.4714 | 0.1529 | 1.4967 | 0.1338 | 9.9996095e-11 | 909 | | 1.4632 | 0.1600 | 1.4967 | 0.1338 | 9.999609e-11 | 910 | | 1.4706 | 0.1506 | 1.4967 | 0.1338 | 9.999608e-11 | 911 | | 1.4776 | 0.1553 | 1.4967 | 0.1338 | 9.9996074e-11 | 912 | | 1.4702 | 0.1318 | 1.4967 | 0.1338 | 9.999607e-11 | 913 | | 1.4824 | 0.1412 | 1.4967 | 0.1338 | 9.999606e-11 | 914 | | 1.4768 | 0.1365 | 1.4966 | 0.1338 | 9.999605e-11 | 915 | | 1.4711 | 0.1435 | 1.4966 | 0.1338 | 9.9996046e-11 | 916 | | 1.4660 | 0.1435 | 1.4966 | 0.1338 | 9.999604e-11 | 917 | | 1.4620 | 0.1506 | 1.4966 | 0.1338 | 9.999603e-11 | 918 | | 1.4723 | 0.1506 | 1.4966 | 0.1338 | 9.9996025e-11 | 919 | | 1.4741 | 0.1318 | 1.4966 | 0.1338 | 9.999602e-11 | 920 | | 1.4686 | 0.1506 | 1.4966 | 0.1338 | 9.999601e-11 | 921 | | 1.4691 | 0.1412 | 1.4966 | 0.1338 | 9.9996005e-11 | 922 | | 1.4691 | 0.1412 | 1.4966 | 0.1338 | 9.9996e-11 | 923 | | 1.4710 | 0.1435 | 1.4966 | 0.1338 | 9.999599e-11 | 924 | | 1.4785 | 0.1435 | 1.4966 | 0.1338 | 9.9995984e-11 | 925 | | 1.4680 | 0.1412 | 1.4966 | 0.1338 | 9.999598e-11 | 926 | | 1.4718 | 0.1388 | 1.4966 | 0.1338 | 9.999597e-11 | 927 | | 1.4692 | 0.1529 | 1.4966 | 0.1338 | 9.999596e-11 | 928 | | 1.4683 | 0.1553 | 1.4966 | 0.1338 | 9.9995956e-11 | 929 | | 1.4708 | 0.1435 | 1.4966 | 0.1338 | 9.999595e-11 | 930 | | 1.4794 | 0.1388 | 1.4966 | 0.1338 | 9.999594e-11 | 931 | | 1.4638 | 0.1553 | 1.4966 | 0.1338 | 9.9995935e-11 | 932 | | 1.4755 | 0.1318 | 1.4966 | 0.1338 | 9.999593e-11 | 933 | | 1.4647 | 0.1529 | 1.4965 | 0.1338 | 9.999592e-11 | 934 | | 1.4746 | 0.1412 | 1.4965 | 0.1338 | 9.9995914e-11 | 935 | | 1.4702 | 0.1459 | 1.4965 | 0.1338 | 9.999591e-11 | 936 | | 1.4683 | 0.1506 | 1.4965 | 0.1338 | 9.99959e-11 | 937 | | 1.4708 | 0.1435 | 1.4965 | 0.1338 | 9.9995894e-11 | 938 | | 1.4755 | 0.1435 | 1.4965 | 0.1338 | 9.999589e-11 | 939 | | 1.4684 | 0.1435 | 1.4965 | 0.1338 | 9.999588e-11 | 940 | | 1.4710 | 0.1388 | 1.4965 | 0.1338 | 9.999587e-11 | 941 | | 1.4666 | 0.1694 | 1.4965 | 0.1338 | 9.9995866e-11 | 942 | | 1.4737 | 0.1365 | 1.4965 | 0.1338 | 9.999586e-11 | 943 | | 1.4687 | 0.1459 | 1.4965 | 0.1338 | 9.999585e-11 | 944 | | 1.4667 | 0.1506 | 1.4965 | 0.1338 | 9.9995845e-11 | 945 | | 1.4716 | 0.1412 | 1.4965 | 0.1338 | 9.999584e-11 | 946 | | 1.4663 | 0.1529 | 1.4965 | 0.1338 | 9.999583e-11 | 947 | | 1.4757 | 0.1459 | 1.4965 | 0.1338 | 9.9995824e-11 | 948 | | 1.4783 | 0.1318 | 1.4965 | 0.1338 | 9.999582e-11 | 949 | | 1.4712 | 0.1412 | 1.4965 | 0.1338 | 9.999581e-11 | 950 | | 1.4732 | 0.1271 | 1.4965 | 0.1338 | 9.99958e-11 | 951 | | 1.4765 | 0.1388 | 1.4965 | 0.1338 | 9.9995796e-11 | 952 | | 1.4674 | 0.1600 | 1.4965 | 0.1338 | 9.999579e-11 | 953 | | 1.4692 | 0.1341 | 1.4964 | 0.1338 | 9.999578e-11 | 954 | | 1.4707 | 0.1506 | 1.4964 | 0.1338 | 9.9995776e-11 | 955 | | 1.4730 | 0.1624 | 1.4964 | 0.1338 | 9.999577e-11 | 956 | | 1.4691 | 0.1576 | 1.4964 | 0.1338 | 9.999576e-11 | 957 | | 1.4721 | 0.1553 | 1.4964 | 0.1338 | 9.9995755e-11 | 958 | | 1.4705 | 0.1341 | 1.4964 | 0.1338 | 9.999575e-11 | 959 | | 1.4677 | 0.1435 | 1.4964 | 0.1338 | 9.999574e-11 | 960 | | 1.4727 | 0.1553 | 1.4964 | 0.1338 | 9.9995734e-11 | 961 | | 1.4690 | 0.1271 | 1.4964 | 0.1338 | 9.999573e-11 | 962 | | 1.4768 | 0.1365 | 1.4964 | 0.1338 | 9.999572e-11 | 963 | | 1.4692 | 0.1506 | 1.4964 | 0.1338 | 9.999571e-11 | 964 | | 1.4736 | 0.1624 | 1.4964 | 0.1338 | 9.9995706e-11 | 965 | | 1.4673 | 0.1529 | 1.4964 | 0.1338 | 9.99957e-11 | 966 | | 1.4750 | 0.1341 | 1.4964 | 0.1338 | 9.999569e-11 | 967 | | 1.4658 | 0.1412 | 1.4964 | 0.1338 | 9.9995685e-11 | 968 | | 1.4730 | 0.1459 | 1.4964 | 0.1338 | 9.999568e-11 | 969 | | 1.4659 | 0.1435 | 1.4964 | 0.1338 | 9.999567e-11 | 970 | | 1.4707 | 0.1553 | 1.4964 | 0.1338 | 9.9995665e-11 | 971 | | 1.4670 | 0.1388 | 1.4964 | 0.1338 | 9.999566e-11 | 972 | | 1.4720 | 0.1294 | 1.4963 | 0.1338 | 9.999565e-11 | 973 | | 1.4672 | 0.1624 | 1.4963 | 0.1338 | 9.9995644e-11 | 974 | | 1.4670 | 0.1647 | 1.4963 | 0.1338 | 9.999564e-11 | 975 | | 1.4688 | 0.1600 | 1.4963 | 0.1338 | 9.999563e-11 | 976 | | 1.4673 | 0.1341 | 1.4963 | 0.1338 | 9.999562e-11 | 977 | | 1.4682 | 0.1365 | 1.4963 | 0.1338 | 9.9995616e-11 | 978 | | 1.4664 | 0.1600 | 1.4963 | 0.1338 | 9.999561e-11 | 979 | | 1.4728 | 0.1388 | 1.4963 | 0.1338 | 9.99956e-11 | 980 | | 1.4704 | 0.1341 | 1.4963 | 0.1338 | 9.9995595e-11 | 981 | | 1.4721 | 0.1506 | 1.4963 | 0.1338 | 9.999559e-11 | 982 | | 1.4660 | 0.1388 | 1.4963 | 0.1338 | 9.999558e-11 | 983 | | 1.4675 | 0.1365 | 1.4963 | 0.1338 | 9.9995574e-11 | 984 | | 1.4641 | 0.1553 | 1.4963 | 0.1338 | 9.999557e-11 | 985 | | 1.4780 | 0.1435 | 1.4963 | 0.1338 | 9.999556e-11 | 986 | | 1.4676 | 0.1365 | 1.4963 | 0.1338 | 9.9995554e-11 | 987 | | 1.4715 | 0.1435 | 1.4963 | 0.1338 | 9.999555e-11 | 988 | | 1.4707 | 0.1435 | 1.4963 | 0.1338 | 9.999554e-11 | 989 | | 1.4668 | 0.1506 | 1.4963 | 0.1338 | 9.999553e-11 | 990 | | 1.4766 | 0.1388 | 1.4963 | 0.1338 | 9.9995526e-11 | 991 | | 1.4772 | 0.1224 | 1.4962 | 0.1338 | 9.999552e-11 | 992 | | 1.4703 | 0.1412 | 1.4962 | 0.1338 | 9.999551e-11 | 993 | | 1.4681 | 0.1576 | 1.4962 | 0.1338 | 9.9995505e-11 | 994 | | 1.4767 | 0.1365 | 1.4962 | 0.1338 | 9.99955e-11 | 995 | | 1.4702 | 0.1318 | 1.4962 | 0.1338 | 9.999549e-11 | 996 | | 1.4753 | 0.1294 | 1.4962 | 0.1338 | 9.9995484e-11 | 997 | | 1.4696 | 0.1553 | 1.4962 | 0.1338 | 9.999548e-11 | 998 | | 1.4794 | 0.1435 | 1.4962 | 0.1338 | 9.999547e-11 | 999 | ### Framework versions - Transformers 4.29.0.dev0 - TensorFlow 2.9.1 - Datasets 2.8.0 - Tokenizers 0.13.2
Bharathdamu/wav2vec2-large-xls-r-300m-hindi
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: bert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.518818601771926 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-cola This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.4610 - Matthews Correlation: 0.5188 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.4985 | 1.0 | 535 | 0.4610 | 0.5188 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
BigSalmon/Flowberta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: mit library_name: sklearn tags: - sklearn - skops - text-classification model_format: pickle model_file: skops-_0q41qzv.pkl --- # Model description This is a `RandomForestClassifier` model trained on JeVeuxAider dataset. As input, the model takes text embeddings encoded with camembert-base (768 tokens) ## Intended uses & limitations This model is not ready to be used in production. ## Training Procedure [More Information Needed] ### Hyperparameters <details> <summary> Click to expand </summary> | Hyperparameter | Value | |---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------| | memory | | | steps | [('scaler', StandardScaler()), ('pca', PCA(n_components=356)), ('rfc', RandomForestClassifier(class_weight='balanced', n_jobs=-1, random_state=42))] | | verbose | False | | scaler | StandardScaler() | | pca | PCA(n_components=356) | | rfc | RandomForestClassifier(class_weight='balanced', n_jobs=-1, random_state=42) | | scaler__copy | True | | scaler__with_mean | True | | scaler__with_std | True | | pca__copy | True | | pca__iterated_power | auto | | pca__n_components | 356 | | pca__n_oversamples | 10 | | pca__power_iteration_normalizer | auto | | pca__random_state | | | pca__svd_solver | auto | | pca__tol | 0.0 | | pca__whiten | False | | rfc__bootstrap | True | | rfc__ccp_alpha | 0.0 | | rfc__class_weight | balanced | | rfc__criterion | gini | | rfc__max_depth | | | rfc__max_features | sqrt | | rfc__max_leaf_nodes | | | rfc__max_samples | | | rfc__min_impurity_decrease | 0.0 | | rfc__min_samples_leaf | 1 | | rfc__min_samples_split | 2 | | rfc__min_weight_fraction_leaf | 0.0 | | rfc__n_estimators | 100 | | rfc__n_jobs | -1 | | rfc__oob_score | False | | rfc__random_state | 42 | | rfc__verbose | 0 | | rfc__warm_start | False | </details> ### Model Plot <style>#sk-container-id-2 {color: black;background-color: white;}#sk-container-id-2 pre{padding: 0;}#sk-container-id-2 div.sk-toggleable {background-color: white;}#sk-container-id-2 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-2 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-2 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-2 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-2 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-2 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-2 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-2 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-2 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-2 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-2 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-2 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-2 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-2 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-2 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-2 div.sk-item {position: relative;z-index: 1;}#sk-container-id-2 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-2 div.sk-item::before, #sk-container-id-2 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-2 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-2 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-2 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-2 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-2 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-2 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-2 div.sk-label-container {text-align: center;}#sk-container-id-2 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-2 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-2" class="sk-top-container" style="overflow: auto;"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[(&#x27;scaler&#x27;, StandardScaler()), (&#x27;pca&#x27;, PCA(n_components=356)),(&#x27;rfc&#x27;,RandomForestClassifier(class_weight=&#x27;balanced&#x27;, n_jobs=-1,random_state=42))])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-5" type="checkbox" ><label for="sk-estimator-id-5" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[(&#x27;scaler&#x27;, StandardScaler()), (&#x27;pca&#x27;, PCA(n_components=356)),(&#x27;rfc&#x27;,RandomForestClassifier(class_weight=&#x27;balanced&#x27;, n_jobs=-1,random_state=42))])</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-6" type="checkbox" ><label for="sk-estimator-id-6" class="sk-toggleable__label sk-toggleable__label-arrow">StandardScaler</label><div class="sk-toggleable__content"><pre>StandardScaler()</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-7" type="checkbox" ><label for="sk-estimator-id-7" class="sk-toggleable__label sk-toggleable__label-arrow">PCA</label><div class="sk-toggleable__content"><pre>PCA(n_components=356)</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-8" type="checkbox" ><label for="sk-estimator-id-8" class="sk-toggleable__label sk-toggleable__label-arrow">RandomForestClassifier</label><div class="sk-toggleable__content"><pre>RandomForestClassifier(class_weight=&#x27;balanced&#x27;, n_jobs=-1, random_state=42)</pre></div></div></div></div></div></div></div> ## Evaluation Results | Metric | Value | |----------|----------| | accuracy | 0.964555 | | f1 score | 0.959138 | ### Confusion Matrix ![Confusion Matrix](confusion_matrix.png) # How to Get Started with the Model [More Information Needed] # Model Card Authors huynhdoo # Model Card Contact You can contact the model card authors through following channels: [More Information Needed] # Citation **BibTeX** ``` @inproceedings{...,year={2023}} ``` # get_started_code import pickle as pickle with open(pkl_filename, 'rb') as file: pipe = pickle.load(file)
BigSalmon/GPTNeo350MInformalToFormalLincoln2
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - generated_from_trainer datasets: - go_emotions metrics: - accuracy - f1 model-index: - name: goemotions_bertspanish_finetunig_e results: - task: name: Text Classification type: text-classification dataset: name: go_emotions type: go_emotions config: simplified split: test args: simplified metrics: - name: Accuracy type: accuracy value: 0.4 - name: F1 type: f1 value: 0.2777912523419085 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # goemotions_bertspanish_finetunig_e This model is a fine-tuned version of [dccuchile/bert-base-spanish-wwm-cased](https://huggingface.co/dccuchile/bert-base-spanish-wwm-cased) on the go_emotions dataset. It achieves the following results on the evaluation set: - Loss: 3.3859 - Accuracy: 0.4 - F1: 0.2778 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 16 ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
BigSalmon/GPTNeo350MInformalToFormalLincoln3
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
This is where you write how to use? Test! Okay it works :) ## TODO * [ ] Minimal working code * [ ] Dataset description
BigSalmon/InformalToFormalLincoln19
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- language: en tags: - multivae license: apache-2.0 --- ### Downloading this model from the Hub This model was trained with multivae. It can be downloaded or reloaded using the method `load_from_hf_hub` ```python >>> from multivae.models import AutoModel >>> model = AutoModel.load_from_hf_hub(hf_hub_path="your_hf_username/repo_name") ```
BigSalmon/TS3
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
Lora fine tune trained on this [dataset](https://huggingface.co/datasets/ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered) ```python Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {data_point["instruction"]} ### Response: {data_point["output"]} ``` CUDA_VISIBLE_DEVICES=0 python llama.py model c4 --wbits 4 --true-sequential --groupsize 128 --save_safetensors 4bit-128g.safetensors
Biniam/en_ti_translate
[ "pytorch", "marian", "text2text-generation", "transformers", "translation", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
2023-05-02T16:43:42Z
--- widget: structuredData: SepalLengthCm: - 5.1 - 4.9 - 6.2 SepalWidthCm: - 3.5 - 3 - 3.4 PetalLengthCm: - 1.4 - 1.4 - 5.4 PetalWidthCm: - 0.2 - 0.2 - 2.3 tags: - tabular-classification - sklearn --- ### How to use ```python import joblib from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split REPO_ID = "d2i-pti-iu/test_svc_model" FILENAME = "iris_svm.joblib" model = joblib.load("../d2i-pti-iu/iris_test/iris_svm.joblib") iris = load_iris() X = iris.data[:3] Y = iris.target[:3] labels = model.predict(X) ``` #### Eval ```python model.score(X, Y) ```
BogdanKuloren/continual-learning-paper-embeddings-model
[ "pytorch", "mpnet", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "MPNetModel" ], "model_type": "mpnet", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2023-05-02T17:16:16Z
--- license: mit tags: - generated_from_keras_callback model-index: - name: jva-missions-report-v2 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # jva-missions-report-v2 This model is a fine-tuned version of [camembert-base](https://huggingface.co/camembert-base) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.1873 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'weight_decay': None, 'clipnorm': None, 'global_clipnorm': None, 'clipvalue': None, 'use_ema': False, 'ema_momentum': 0.99, 'ema_overwrite_frequency': None, 'jit_compile': False, 'is_legacy_optimizer': False, 'learning_rate': 0.001, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Epoch | |:----------:|:-----:| | 0.1873 | 0 | ### Framework versions - Transformers 4.28.1 - TensorFlow 2.12.0 - Datasets 2.12.0 - Tokenizers 0.13.3
BossLee/t5-gec
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
2023-05-02T17:23:56Z
--- license: cc-by-4.0 tags: - generated_from_trainer metrics: - f1 - recall - accuracy - precision model-index: - name: bertin-roberta-fine-tuned-text-classification-SL-data-augmentation-test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bertin-roberta-fine-tuned-text-classification-SL-data-augmentation-test This model is a fine-tuned version of [bertin-project/bertin-roberta-base-spanish](https://huggingface.co/bertin-project/bertin-roberta-base-spanish) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.7374 - F1: 0.1580 - Recall: 0.3233 - Accuracy: 0.3233 - Precision: 0.1045 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Recall | Accuracy | Precision | |:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:--------:|:---------:| | 2.7132 | 1.0 | 6530 | 2.7463 | 0.1580 | 0.3233 | 0.3233 | 0.1045 | | 2.7441 | 2.0 | 13060 | 2.7423 | 0.1580 | 0.3233 | 0.3233 | 0.1045 | | 2.7328 | 3.0 | 19590 | 2.7365 | 0.1580 | 0.3233 | 0.3233 | 0.1045 | | 2.7464 | 4.0 | 26120 | 2.7374 | 0.1580 | 0.3233 | 0.3233 | 0.1045 | | 2.7178 | 5.0 | 32650 | 2.7374 | 0.1580 | 0.3233 | 0.3233 | 0.1045 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
BotterHax/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2023-05-02T17:24:12Z
--- license: other tags: - vision - image-segmentation - generated_from_trainer model-index: - name: segformer-b0-finetuned-human-parsing results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # segformer-b0-finetuned-human-parsing This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.9476 - Mean Iou: 0.0726 - Mean Accuracy: 0.1221 - Overall Accuracy: 0.3575 - Accuracy Background: nan - Accuracy Hat: 0.0048 - Accuracy Hair: 0.4813 - Accuracy Sunglasses: 0.0 - Accuracy Upper-clothes: 0.9405 - Accuracy Skirt: 0.0000 - Accuracy Pants: 0.0631 - Accuracy Dress: 0.1031 - Accuracy Belt: 0.0 - Accuracy Left-shoe: 0.0011 - Accuracy Right-shoe: 0.0010 - Accuracy Face: 0.4406 - Accuracy Left-leg: 0.0291 - Accuracy Right-leg: 0.0 - Accuracy Left-arm: 0.0 - Accuracy Right-arm: 0.0001 - Accuracy Bag: 0.0114 - Accuracy Scarf: 0.0 - Iou Background: 0.0 - Iou Hat: 0.0043 - Iou Hair: 0.4221 - Iou Sunglasses: 0.0 - Iou Upper-clothes: 0.3239 - Iou Skirt: 0.0000 - Iou Pants: 0.0559 - Iou Dress: 0.0728 - Iou Belt: 0.0 - Iou Left-shoe: 0.0011 - Iou Right-shoe: 0.0009 - Iou Face: 0.3872 - Iou Left-leg: 0.0271 - Iou Right-leg: 0.0 - Iou Left-arm: 0.0 - Iou Right-arm: 0.0001 - Iou Bag: 0.0106 - Iou Scarf: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Accuracy Background | Accuracy Hat | Accuracy Hair | Accuracy Sunglasses | Accuracy Upper-clothes | Accuracy Skirt | Accuracy Pants | Accuracy Dress | Accuracy Belt | Accuracy Left-shoe | Accuracy Right-shoe | Accuracy Face | Accuracy Left-leg | Accuracy Right-leg | Accuracy Left-arm | Accuracy Right-arm | Accuracy Bag | Accuracy Scarf | Iou Background | Iou Hat | Iou Hair | Iou Sunglasses | Iou Upper-clothes | Iou Skirt | Iou Pants | Iou Dress | Iou Belt | Iou Left-shoe | Iou Right-shoe | Iou Face | Iou Left-leg | Iou Right-leg | Iou Left-arm | Iou Right-arm | Iou Bag | Iou Scarf | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:-------------------:|:------------:|:-------------:|:-------------------:|:----------------------:|:--------------:|:--------------:|:--------------:|:-------------:|:------------------:|:-------------------:|:-------------:|:-----------------:|:------------------:|:-----------------:|:------------------:|:------------:|:--------------:|:--------------:|:-------:|:--------:|:--------------:|:-----------------:|:---------:|:---------:|:---------:|:--------:|:-------------:|:--------------:|:--------:|:------------:|:-------------:|:------------:|:-------------:|:-------:|:---------:| | 2.5768 | 0.4 | 20 | 2.7812 | 0.0726 | 0.1332 | 0.2876 | nan | 0.0178 | 0.3204 | 0.0004 | 0.5548 | 0.0004 | 0.2555 | 0.2373 | 0.0 | 0.0103 | 0.0003 | 0.5637 | 0.0287 | 0.0302 | 0.0001 | 0.0008 | 0.2435 | 0.0 | 0.0 | 0.0166 | 0.2759 | 0.0001 | 0.2781 | 0.0004 | 0.1710 | 0.1295 | 0.0 | 0.0098 | 0.0003 | 0.3251 | 0.0260 | 0.0248 | 0.0001 | 0.0007 | 0.0491 | 0.0 | | 2.2093 | 0.8 | 40 | 2.5166 | 0.0563 | 0.1052 | 0.3288 | nan | 0.0 | 0.1994 | 0.0 | 0.9447 | 0.0015 | 0.0435 | 0.1164 | 0.0 | 0.0008 | 0.0000 | 0.4655 | 0.0007 | 0.0003 | 0.0 | 0.0 | 0.0153 | 0.0 | 0.0 | 0.0 | 0.1946 | 0.0 | 0.3037 | 0.0015 | 0.0417 | 0.0842 | 0.0 | 0.0008 | 0.0000 | 0.3726 | 0.0007 | 0.0003 | 0.0 | 0.0 | 0.0124 | 0.0 | | 1.8804 | 1.2 | 60 | 2.0209 | 0.0632 | 0.1110 | 0.3374 | nan | 0.0087 | 0.3724 | 0.0 | 0.9475 | 0.0014 | 0.0162 | 0.0528 | 0.0 | 0.0001 | 0.0008 | 0.4257 | 0.0561 | 0.0001 | 0.0 | 0.0 | 0.0055 | 0.0 | 0.0 | 0.0077 | 0.3472 | 0.0 | 0.3086 | 0.0014 | 0.0156 | 0.0403 | 0.0 | 0.0001 | 0.0008 | 0.3597 | 0.0515 | 0.0001 | 0.0 | 0.0 | 0.0052 | 0.0 | | 1.8776 | 1.6 | 80 | 2.0016 | 0.0665 | 0.1154 | 0.3454 | nan | 0.0056 | 0.4172 | 0.0 | 0.9412 | 0.0000 | 0.0490 | 0.0697 | 0.0 | 0.0002 | 0.0006 | 0.4349 | 0.0329 | 0.0000 | 0.0 | 0.0000 | 0.0100 | 0.0 | 0.0 | 0.0048 | 0.3791 | 0.0 | 0.3138 | 0.0000 | 0.0438 | 0.0542 | 0.0 | 0.0002 | 0.0006 | 0.3608 | 0.0304 | 0.0000 | 0.0 | 0.0000 | 0.0093 | 0.0 | | 1.8471 | 2.0 | 100 | 1.9476 | 0.0726 | 0.1221 | 0.3575 | nan | 0.0048 | 0.4813 | 0.0 | 0.9405 | 0.0000 | 0.0631 | 0.1031 | 0.0 | 0.0011 | 0.0010 | 0.4406 | 0.0291 | 0.0 | 0.0 | 0.0001 | 0.0114 | 0.0 | 0.0 | 0.0043 | 0.4221 | 0.0 | 0.3239 | 0.0000 | 0.0559 | 0.0728 | 0.0 | 0.0011 | 0.0009 | 0.3872 | 0.0271 | 0.0 | 0.0 | 0.0001 | 0.0106 | 0.0 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Branex/gpt-neo-2.7B
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 datasets: - OpenAssistant/oasst1 language: - en metrics: - accuracy library_name: adapter-transformers pipeline_tag: text-classification ---
Brinah/1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1623167114075832323/FeVdguyt_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">CARMAXLLA</div> <div style="text-align: center; font-size: 14px;">@carmaxlla</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from CARMAXLLA. | Data | CARMAXLLA | | --- | --- | | Tweets downloaded | 3139 | | Retweets | 409 | | Short tweets | 574 | | Tweets kept | 2156 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/t435hmy0/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @carmaxlla's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/mduks720) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/mduks720/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/carmaxlla') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Broadus20/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the squad dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Buntan/bert-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: transcriber-t5-v6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # transcriber-t5-v6 This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0466 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 20 - eval_batch_size: 20 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.129 | 0.29 | 500 | 0.1034 | | 0.0962 | 0.59 | 1000 | 0.0758 | | 0.0754 | 0.88 | 1500 | 0.0611 | | 0.097 | 1.18 | 2000 | 0.0562 | | 0.034 | 1.47 | 2500 | 0.0502 | | 0.0679 | 1.77 | 3000 | 0.0466 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
CAMeL-Lab/bert-base-arabic-camelbert-ca-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
85
null
--- license: unknown inference: false tags: - mlconsole - tabular-classification library_name: mlconsole metrics: - accuracy - loss datasets: - production.csv_(1) model-index: - name: production_test results: - task: type: tabular-classification name: tabular-classification dataset: type: production.csv_(1) name: production.csv_(1) metrics: - type: accuracy name: Accuracy value: 0 - type: loss name: Model loss value: 0 --- # classification model trained on "production.csv_(1)" 🤖 [Load and use this model](https://mlconsole.com/model/hf/Danasoumoh/production_test) in one click. 🧑‍💻 [Train your own model](https://mlconsole.com) on ML Console.
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
null
--- library_name: stable-baselines3 tags: - AntBulletEnv-v0 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: A2C results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: AntBulletEnv-v0 type: AntBulletEnv-v0 metrics: - type: mean_reward value: 1499.67 +/- 82.60 name: mean_reward verified: false --- # **A2C** Agent playing **AntBulletEnv-v0** This is a trained model of a **A2C** agent playing **AntBulletEnv-v0** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CAMeL-Lab/bert-base-arabic-camelbert-da-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
null
--- license: mit datasets: - wikipedia language: - it widget: - text: "milano è una <mask> dell'italia" example_title: "Example 1" - text: "giacomo leopardi è stato uno dei più grandi <mask> del classicismo italiano" example_title: "Example 2" - text: "la pizza è un noto simbolo della <mask> gastronomica italiana" example_title: "Example 3" --- -------------------------------------------------------------------------------------------------- <body> <span class="vertical-text" style="background-color:lightgreen;border-radius: 3px;padding: 3px;"> </span> <br> <span class="vertical-text" style="background-color:orange;border-radius: 3px;padding: 3px;">  </span> <br> <span class="vertical-text" style="background-color:lightblue;border-radius: 3px;padding: 3px;">    Model: FLARE 🔥</span> <br> <span class="vertical-text" style="background-color:tomato;border-radius: 3px;padding: 3px;">    Lang: IT</span> <br> <span class="vertical-text" style="background-color:lightgrey;border-radius: 3px;padding: 3px;">  </span> <br> <span class="vertical-text" style="background-color:#CF9FFF;border-radius: 3px;padding: 3px;"> </span> </body> -------------------------------------------------------------------------------------------------- <h3>Introduction</h3> This model is a <b>lightweight</b> and uncased version of <b>MiniLM</b> <b>[1]</b> for the <b>Italian</b> language. Its <b>17M parameters</b> and <b>67MB</b> size make it <b>85% lighter</b> than a typical mono-lingual BERT model. It is ideal when memory consumption and execution speed are critical while maintaining high-quality results. <h3>Model description</h3> The model builds on <b>mMiniLMv2</b> <b>[1]</b> (from Microsoft: [L6xH384 mMiniLMv2](https://github.com/microsoft/unilm/tree/master/minilm)) as a starting point, focusing it on the Italian language while at the same time turning it into an uncased model by modifying the embedding layer (as in <b>[2]</b>, but computing document-level frequencies over the <b>Wikipedia</b> dataset and setting a frequency threshold of 0.1%), which brings a considerable reduction in the number of parameters. To compensate for the deletion of cased tokens, which now forces the model to exploit lowercase representations of words previously capitalized, the model has been further pre-trained on the Italian split of the [Wikipedia](https://huggingface.co/datasets/wikipedia) dataset, using the <b>whole word masking [3]</b> technique to make it more robust to the new uncased representations. The resulting model has 17M parameters, a vocabulary of 14.610 tokens, and a size of 67MB, which makes it <b>85% lighter</b> than a typical mono-lingual BERT model and 75% lighter than a standard mono-lingual DistilBERT model. <h3>Training procedure</h3> The model has been trained for <b>masked language modeling</b> on the Italian <b>Wikipedia</b> (~3GB) dataset for 10K steps, using the AdamW optimizer, with a batch size of 512 (obtained through 128 gradient accumulation steps), a sequence length of 512, and a linearly decaying learning rate starting from 5e-5. The training has been performed using <b>dynamic masking</b> between epochs and exploiting the <b>whole word masking</b> technique. <h3>Performances</h3> The following metrics have been computed on the Part of Speech Tagging and Named Entity Recognition tasks, using the <b>UD Italian ISDT</b> and <b>WikiNER</b> datasets, respectively. The PoST model has been trained for 5 epochs, and the NER model for 3 epochs, both with a constant learning rate, fixed at 1e-5. For Part of Speech Tagging, the metrics have been computed on the default test set provided with the dataset, while for Named Entity Recognition the metrics have been computed with a 5-fold cross-validation | Task | Recall | Precision | F1 | | ------ | ------ | ------ | ------ | | Part of Speech Tagging | 95.64 | 95.32 | 95.45 | | Named Entity Recognition | 82.27 | 80.64 | 81.29 | The metrics have been computed at the token level and macro-averaged over the classes. <h3>Demo</h3> You can try the model online (fine-tuned on named entity recognition) using this web app: https://huggingface.co/spaces/osiria/flare-it-demo <h3>Quick usage</h3> ```python from transformers import AutoTokenizer, XLMRobertaForMaskedLM from transformers import pipeline tokenizer = AutoTokenizer.from_pretrained("osiria/flare-it") model = XLMRobertaForMaskedLM.from_pretrained("osiria/flare-it") pipeline_mlm = pipeline(task="fill-mask", model=model, tokenizer=tokenizer) ``` <h3>Limitations</h3> This lightweight model has been further pre-trained on Wikipedia, so it's particularly suitable as an agile analyzer for large volumes of natively digital text from the world wide web, written in a correct and fluent form (like wikis, web pages, news, etc.). However, it may show limitations when it comes to chaotic text, containing errors and slang expressions (like social media posts) or when it comes to domain-specific text (like medical, financial or legal content). <h3>References</h3> [1] https://arxiv.org/abs/2012.15828 [2] https://arxiv.org/abs/2010.05609 [3] https://arxiv.org/abs/1906.08101 <h3>License</h3> The model is released under <b>MIT</b> license
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- inference: parameters: do_sample: true max_length: 384 top_p: 0.9 repetition_penalty: 1.0 language: - en license: mit tags: - "text2text generation" task: name: "lyrics interpretation" type: "text2text generation" widget: - text: "Explain: \nLoving him is like driving a new Maserati down a dead end street\nFaster than the wind, passionate as sin, ending so suddenly\nLoving him is like trying to change your mind\nOnce you're already flying through the free fall\nLike the colors in autumn, so bright, just before they lose it all\n\nLosing him was blue, like I'd never known\nMissing him was dark gray, all alone\nForgetting him was like trying to know\nSomebody you never met\nBut loving him was red\nLoving him was red\n\nTouching him was like realizing all you ever wanted\nWas right there in front of you\nMemorizing him was as easy as knowing all the words\nTo your old favorite song\nFighting with him was like trying to solve a crossword\nAnd realizing there's no right answer\nRegretting him was like wishing you never found out\nThat love could be that strong\n\nLosing him was blue, like I'd never known\nMissing him was dark gray, all alone\nForgetting him was like trying to know\nSomebody you never met\nBut loving him was red\nOh, red\nBurning red\n\nRemembering him comes in flashbacks and echoes\nTell myself it's time now gotta let go\nBut moving on from him is impossible\nWhen I still see it all in my head\nIn burning red\nBurning, it was red\n\nOh, losing him was blue, like I'd never\nnown\nMissing him was dark gray, all alone\nForgetting him was like trying to know\nSomebody you never met\n'Cause loving him was red\nYeah, yeah, red\nBurning red\n\nAnd that's why he's spinning 'round in my head\nComes back to me, burning red\nYeah, yeah\nHis love was like driving a new Maserati down a dead end street" example_title: "Red - Taylor Swift" --- # Overview This pilot hub aims to test whether a flan-t5-base can effectively automate poem interpretation. To use the hub, simply paste in any poem of interest and see its meaning. Please begin your request with the prompt, 'Explain: '.
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- inference: parameters: do_sample: true max_length: 384 top_p: 0.9 repetition_penalty: 1.0 language: - en license: mit tags: - "text2text generation" task: name: "poem interpretation" type: "text2text generation" widget: - text: "Explain: \nThe Lost Boy\n\nBoy it really stinks in here\nThe dumpster is not the place\nTo get the food you need each day\nJust to feed your face.\n\nA ten-year-old with a dirty face\nCrawls out with his daily meal\nWhat is he doing in this place\nHow am I suppose to feel?\n\nHis mother cradles a baby \nThe child's been dead three weeks\nHer mind is gone from drug abuse\nAnd now she hardly speaks.\n\nGrandma is a drunkard\nWith men who come to town\nBringing her a bottle\nJust to go a round.\n\nDrugs out on the table \nA line or two is good\nThat should carry her over \nNo one ever understood.\n\nThe little boy with dirty face\nHas not been schooled in years\nHe fights the streets alone\nLong since lost his fears.\n\nA stale sandwich, and watered coke\nHis meal for this day\nWhatever tomorrow may bring\nHe knows not the word play.\n\nEmaciated with distant eyes\nNo one really sees him\nJust one of the lost boys\nHis life completely grim.\n\nGod bless the children!\n\n" example_title: "The Lost Boy - pattyann4500 (allpoetry.com/920731)" - text: "Explain: \nLet your breath be the air I need,\nwhen I drown in your eyes as I see.\nLet yourself fall into my arms that bleed,\nwhen the world shows you no mercy.\n\nLet your sad past bury down in the core,\nwhen you walk with your heart close to me.\nLet there be your lovely face at the door,\nWhen I return from the war no matter how long it be.\n\nLet your love nourish my frozen heart,\nwhen it lies under the snow capped tree.\nLet me be enslaved with you forever from the start,\nwhen the time comes, together we shall flee.\n\nLet your presence enlighten my dark,\nwhen your smile reflects in the sea.\nLet the words of love be thy spark,\nwhen you come out of dreams to kiss me.\n\nI wish we were together... my princess... \n" example_title: "Princess... - Soulhealer95 (allpoetry.com/11038949)" --- # Overview The aim of this pilot hub is to test whether a Flan-T5-Base model, when pre-trained with a lyrics interpretation task, can better interpret poems. To use the hub, simply paste in any poem of interest and see its meaning. Please begin your request with the prompt, 'Explain: '.
CAMeL-Lab/bert-base-arabic-camelbert-da
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
449
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 257.28 +/- 16.62 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar-corpus6
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
null
--- datasets: - logo-wizard/modern-logo-dataset tags: - text-to-image - lora - stable-diffusion pipeline_tag: text-to-image license: creativeml-openrail-m --- # LoRA text2image fine-tuning - eewwann/logo-diffusion-lora-v10 These are LoRA with Hadamard Product (LoHa) adaption weights for [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-nonema-pruned.safetensors). The weights were fine-tuned on the [logo-wizard/modern-logo-dataset](https://huggingface.co/datasets/logo-wizard/modern-logo-dataset) dataset. You can find some example images in the following. ![img_0](./image_0.jpg) ![img_1](./image_1.jpg) ![img_2](./image_2.jpg) ![img_3](./image_3.jpg)
CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.76 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="yasndr/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,862
null
## Psychology-Alpaca-RM - PEFT adapter layers for a reward model based on ``decapoda-research/llama-7b-hf``. - Trained with a small subset (110 data points) of ``samhog/cgpt-pairs`` with 10K prompts, each with two answers (one 'good', one 'bad')
CAMeL-Lab/bert-base-arabic-camelbert-mix
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "Arabic", "Dialect", "Egyptian", "Gulf", "Levantine", "Classical Arabic", "MSA", "Modern Standard Arabic", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20,880
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 259.59 +/- 19.34 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CAMeL-Lab/bert-base-arabic-camelbert-msa-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1080557016463147008/sPN7F0Dd_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Scratch Team</div> <div style="text-align: center; font-size: 14px;">@scratch</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Scratch Team. | Data | Scratch Team | | --- | --- | | Tweets downloaded | 3161 | | Retweets | 2028 | | Short tweets | 4 | | Tweets kept | 1129 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/qnkb8q9j/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @scratch's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1yt6szut) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1yt6szut/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/scratch') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
CAMeL-Lab/bert-base-arabic-camelbert-msa-sixteenth
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1627766675620745235/CgPEg0Tc_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Chris Uri</div> <div style="text-align: center; font-size: 14px;">@redcloudnimbus</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Chris Uri. | Data | Chris Uri | | --- | --- | | Tweets downloaded | 1359 | | Retweets | 208 | | Short tweets | 199 | | Tweets kept | 952 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/p68z097t/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @redcloudnimbus's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/s8pwy6bb) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/s8pwy6bb/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/redcloudnimbus') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
CAUKiel/JavaBERT-uncased
[ "pytorch", "safetensors", "bert", "fill-mask", "java", "code", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: expert-freelaw results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # expert-freelaw This model is a fine-tuned version of [EleutherAI/pythia-1b-deduped](https://huggingface.co/EleutherAI/pythia-1b-deduped) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0413 - Accuracy: 0.5643 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 8 - total_train_batch_size: 64 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 1000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.0772 | 0.01 | 200 | 2.0728 | 0.5588 | | 2.0718 | 0.01 | 400 | 2.0656 | 0.5600 | | 2.0661 | 0.02 | 600 | 2.0561 | 0.5617 | | 2.0606 | 0.03 | 800 | 2.0472 | 0.5632 | | 2.0514 | 0.04 | 1000 | 2.0413 | 0.5643 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu117 - Datasets 2.11.0 - Tokenizers 0.13.3
CAUKiel/JavaBERT
[ "pytorch", "safetensors", "bert", "fill-mask", "code", "arxiv:2110.10404", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
388
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="JacksonBurton/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Cameron/BERT-eec-emotion
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: bert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5365007161029405 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-finetuned-cola This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.4711 - Matthews Correlation: 0.5365 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 9.678498850368218e-06 - train_batch_size: 32 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | No log | 1.0 | 268 | 0.4731 | 0.4664 | | 0.4819 | 2.0 | 536 | 0.4537 | 0.5233 | | 0.4819 | 3.0 | 804 | 0.4711 | 0.5365 | ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
Cameron/BERT-mdgender-wizard
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- library_name: "transformers.js" --- https://huggingface.co/openai/whisper-small.en with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
Canadiancaleb/DialoGPT-small-jesse
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-finetuned-amazon-en-es results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-amazon-en-es This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.0132 - Rouge1: 16.4719 - Rouge2: 7.9366 - Rougel: 16.2123 - Rougelsum: 16.2853 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 3.9249 | 1.0 | 1209 | 3.1904 | 15.8207 | 8.0555 | 15.4584 | 15.648 | | 3.5688 | 2.0 | 2418 | 3.0812 | 16.3271 | 8.1479 | 15.9001 | 16.0134 | | 3.3905 | 3.0 | 3627 | 3.0442 | 15.9864 | 7.295 | 15.4247 | 15.5848 | | 3.2728 | 4.0 | 4836 | 3.0304 | 16.2893 | 7.5851 | 15.9494 | 16.0117 | | 3.1958 | 5.0 | 6045 | 3.0169 | 15.4888 | 7.4495 | 15.2244 | 15.2326 | | 3.1359 | 6.0 | 7254 | 3.0158 | 16.3866 | 8.2218 | 16.0625 | 16.0953 | | 3.1059 | 7.0 | 8463 | 3.0075 | 15.9134 | 7.8387 | 15.626 | 15.6499 | | 3.0852 | 8.0 | 9672 | 3.0132 | 16.4719 | 7.9366 | 16.2123 | 16.2853 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu117 - Datasets 2.12.0 - Tokenizers 0.13.2
Canadiancaleb/jessebot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: openrail tags: - scat - lora - stable diffusion --- Here's the defecation lora, it was available on Civitai until the ban on scat content. You can use various trigger words to get different effects, like "Scat", "Disposal", "Feces" and so on. The main problem with this model is that that it tends to confuse the anus and the vagina, so you'll have to add prompts and negatives usefull to reduce this effect. You can find my other models on Civitai: https://civitai.com/user/JollyIm/models A first example: ![00257-695011179.png](https://s3.amazonaws.com/moonup/production/uploads/64133dd16cd62eb3ba1f8a60/NwGgNExRAgOg1UTVOU9OB.png) Prompts: Realistic, Realism, (Masterpiece, Best Quality, High Quality, Highres:1.4), Detailed, Extremely Detailed, Ambient Soft Lighting, 4K, (Extremely Detailed Eyes, Detailed Face and Skin:1.2), masterpiece, best quality, 1girl, feces, disposal, (anal:1.2), <lora:defecation_v1:0.7>, (public toilet), embarassed, (pile of feces), (perfect pussy), (perfect vagina), Negative prompt: easynegative, (worst quality:1.2), (low quality:1.2), (vaginal), (dirty vagina:1.2), (feces in vagina:1.2), (feces in vagina:1.2) Second example: ![download.png](https://s3.amazonaws.com/moonup/production/uploads/64133dd16cd62eb3ba1f8a60/Zp8ZGoXJTL52_mtvMEXS2.png) Prompts: masterpiece, best quality, 1girl, scat, (anal:1.2), <lora:defecation_v1:0.9>, (toilet), from behind, Negative prompt: easynegative, (worst quality:1.2), (low quality:1.2), (vaginal), (dirty vagina:1.2), (scat in vagina:1.2), (feces in vagina:1.2)
CapitainData/wav2vec2-large-xlsr-turkish-demo-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: "transformers.js" --- https://huggingface.co/facebook/nllb-200-distilled-600M with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
Capreolus/birch-bert-large-msmarco_mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - he metrics: - accuracy library_name: transformers pipeline_tag: text-classification tags: - legal ---
Capreolus/electra-base-msmarco
[ "pytorch", "tf", "electra", "text-classification", "arxiv:2008.09093", "transformers" ]
text-classification
{ "architectures": [ "ElectraForSequenceClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
110
null
--- library_name: "transformers.js" --- https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
Carlork314/Xd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: "transformers.js" --- https://huggingface.co/distilbert-base-cased-distilled-squad with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
CarlosPR/mt5-spanish-memmories-analysis
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: "transformers.js" --- https://huggingface.co/bert-base-uncased with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
Carolhuehuehuehue/Sla
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: "transformers.js" --- https://huggingface.co/sshleifer/distilbart-cnn-6-6 with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
Cedille/fr-boris
[ "pytorch", "gptj", "text-generation", "fr", "dataset:c4", "arxiv:2202.03371", "transformers", "causal-lm", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPTJForCausalLM" ], "model_type": "gptj", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
401
null
--- library_name: "transformers.js" --- https://huggingface.co/google/flan-t5-small with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).