{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'OCR模型免费转Markdown' && linkText !== 'OCR模型免费转Markdown' ) { link.textContent = 'OCR模型免费转Markdown'; link.href = 'https://fast360.xyz'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== '模型下载攻略' ) { link.textContent = '模型下载攻略'; link.href = '/'; replacedLinks.add(link); } // 删除Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'OCR模型免费转Markdown'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n```\n\nNote: you can add the `theme` attribute to the `` tag to force the theme to be dark or light (by default, it respects the system theme). E.g.\n\n```html\n\n...\n\n```\n\n### 3. Write your Gradio app inside of the tags\n\nNow, write your Gradio app as you would normally, in Python! Keep in mind that since this is Python, whitespace and indentations matter. \n\n```html\n\n\t\n\t\t\n\t\t\n\t\n\t\n\t\t\n\t\timport gradio as gr\n\n\t\tdef greet(name):\n\t\t\treturn \"Hello, \" + name + \"!\"\n\t\t\n\t\tgr.Interface(greet, \"textbox\", \"textbox\").launch()\n\t\t\n\t\n\n```\n\nAnd that's it! You should now be able to open your HTML page in the browser and see the Gradio app rendered! Note that it may take a little while for the Gradio app to load initially since Pyodide can take a while to install in your browser.\n\n**Note on debugging**: to see any errors in your Gradio-lite application, open the inspector in your web browser. All errors (including Python errors) will be printed there.\n\n## More Examples: Adding Additional Files and Requirements\n\nWhat if you want to create a Gradio app that spans multiple files? Or that has custom Python requirements? Both are possible with `@gradio/lite`!\n\n### Multiple Files\n\nAdding multiple files within a `@gradio/lite` app is very straightforward: use the `` tag. You can have as many `` tags as you want, but each one needs to have a `name` attribute and the entry point to your Gradio app should have the `entrypoint` attribute.\n\nHere's an example:\n\n```html\n\n\n\nimport gradio as gr\nfrom utils import add\n\ndemo = gr.Interface(fn=add, inputs=[\"number\", \"number\"], outputs=\"number\")\n\ndemo.launch()\n\n\n\ndef add(a, b):\n\treturn a + b\n\n\n\t\t\n\n```\n\n### Additional Requirements\n\nIf your Gradio app has additional requirements, it is usually possible to [install them in the browser using micropip](https://pyodide.org/en/stable/usage/loading-packages.html#loading-packages). We've created a wrapper to make this paticularly convenient: simply list your requirements in the same syntax as a `requirements.txt` and enclose them with `` tags.\n\nHere, we install `transformers_js_py` to run a text classification model directly in the browser!\n\n```html\n\n\n\ntransformers_js_py\n\n\n\nfrom transformers_js import import_transformers_js\nimport gradio as gr\n\ntransformers = await import_transformers_js()\npipeline = transformers.pipeline\npipe = await pipeline('sentiment-analysis')\n\nasync def classify(text):\n\treturn await pipe(text)\n\ndemo = gr.Interface(classify, \"textbox\", \"json\")\ndemo.launch()\n\n\n\t\n\n```\n\n**Try it out**: You can see this example running in [this Hugging Face Static Space](https://huggingface.co/spaces/abidlabs/gradio-lite-classify), which lets you host static (serverless) web applications for free. Visit the page and you'll be able to run a machine learning model without internet access!\n\n## Benefits of Using `@gradio/lite`\n\n### 1. Serverless Deployment\nThe primary advantage of @gradio/lite is that it eliminates the need for server infrastructure. This simplifies deployment, reduces server-related costs, and makes it easier to share your Gradio applications with others.\n\n### 2. Low Latency\nBy running in the browser, @gradio/lite offers low-latency interactions for users. There's no need for data to travel to and from a server, resulting in faster responses and a smoother user experience.\n\n### 3. Privacy and Security\nSince all processing occurs within the user's browser, `@gradio/lite` enhances privacy and security. User data remains on their device, providing peace of mind regarding data handling.\n\n### Limitations\n\n* Currently, the biggest limitation in using `@gradio/lite` is that your Gradio apps will generally take more time (usually 5-15 seconds) to load initially in the browser. This is because the browser needs to load the Pyodide runtime before it can render Python code. \n\n* Not every Python package is supported by Pyodide. While `gradio` and many other popular packages (including `numpy`, `scikit-learn`, and `transformers-js`) can be installed in Pyodide, if your app has many dependencies, its worth checking whether the dependencies are included in Pyodide, or can be [installed with `micropip`](https://micropip.pyodide.org/en/v0.2.2/project/api.html#micropip.install).\n\n## Try it out!\n\nYou can immediately try out `@gradio/lite` by copying and pasting this code in a local `index.html` file and opening it with your browser:\n\n```html\n\n\t\n\t\t\n\t\t\n\t\n\t\n\t\t\n\t\timport gradio as gr\n\n\t\tdef greet(name):\n\t\t\treturn \"Hello, \" + name + \"!\"\n\t\t\n\t\tgr.Interface(greet, \"textbox\", \"textbox\").launch()\n\t\t\n\t\n\n```\n\n\nWe've also created a playground on the Gradio website that allows you to interactively edit code and see the results immediately! \n\nPlayground: https://www.gradio.app/playground\n\n\n"},"source":{"kind":"string","value":"huggingface/blog/blob/main/gradio-lite.md"}}},{"rowIdx":915,"cells":{"text":{"kind":"string","value":" The SnowballTarget Environment\n\n\"SnowballTarget\"/\n\nSnowballTarget is an environment we created at Hugging Face using assets from [Kay Lousberg](https://kaylousberg.com/). We have an optional section at the end of this Unit **if you want to learn to use Unity and create your environments**.\n\n## The agent's Goal\n\nThe first agent you're going to train is called Julien the bear 🐻. Julien is trained **to hit targets with snowballs**.\n\nThe Goal in this environment is that Julien **hits as many targets as possible in the limited time** (1000 timesteps). It will need **to place itself correctly in relation to the target and shoot**to do that.\n\nIn addition, to avoid \"snowball spamming\" (aka shooting a snowball every timestep), **Julien has a \"cool off\" system** (it needs to wait 0.5 seconds after a shoot to be able to shoot again).\n\n
\n\"Cool\n
The agent needs to wait 0.5s before being able to shoot a snowball again
\n
\n\n## The reward function and the reward engineering problem\n\nThe reward function is simple. **The environment gives a +1 reward every time the agent's snowball hits a target**. Because the agent's Goal is to maximize the expected cumulative reward, **it will try to hit as many targets as possible**.\n\n\"Reward\n\nWe could have a more complex reward function (with a penalty to push the agent to go faster, for example). But when you design an environment, you need to avoid the *reward engineering problem*, which is having a too complex reward function to force your agent to behave as you want it to do.\nWhy? Because by doing that, **you might miss interesting strategies that the agent will find with a simpler reward function**.\n\nIn terms of code, it looks like this:\n\n\"Reward\"/\n\n\n## The observation space\n\nRegarding observations, we don't use normal vision (frame), but **we use raycasts**.\n\nThink of raycasts as lasers that will detect if they pass through an object.\n\n
\n\"Raycasts\"/\n
Source: ML-Agents documentation
\n
\n\n\nIn this environment, our agent has multiple set of raycasts:\n\"Raycasts\"/\n\nIn addition to raycasts, the agent gets a \"can I shoot\" bool as observation.\n\n\"Obs\"/\n\n## The action space\n\nThe action space is discrete:\n\n\"Action\n"},"source":{"kind":"string","value":"huggingface/deep-rl-class/blob/main/units/en/unit5/snowball-target.mdx"}}},{"rowIdx":916,"cells":{"text":{"kind":"string","value":" What are the policy-based methods?\n\nThe main goal of Reinforcement learning is to **find the optimal policy \\\\(\\pi^{*}\\\\) that will maximize the expected cumulative reward**.\nBecause Reinforcement Learning is based on the *reward hypothesis*: **all goals can be described as the maximization of the expected cumulative reward.**\n\nFor instance, in a soccer game (where you're going to train the agents in two units), the goal is to win the game. We can describe this goal in reinforcement learning as\n**maximizing the number of goals scored** (when the ball crosses the goal line) into your opponent's soccer goals. And **minimizing the number of goals in your soccer goals**.\n\n\"Soccer\"\n\n## Value-based, Policy-based, and Actor-critic methods\n\nIn the first unit, we saw two methods to find (or, most of the time, approximate) this optimal policy \\\\(\\pi^{*}\\\\).\n\n- In *value-based methods*, we learn a value function.\n - The idea is that an optimal value function leads to an optimal policy \\\\(\\pi^{*}\\\\).\n - Our objective is to **minimize the loss between the predicted and target value** to approximate the true action-value function.\n - We have a policy, but it's implicit since it **is generated directly from the value function**. For instance, in Q-Learning, we used an (epsilon-)greedy policy.\n\n- On the other hand, in *policy-based methods*, we directly learn to approximate \\\\(\\pi^{*}\\\\) without having to learn a value function.\n - The idea is **to parameterize the policy**. For instance, using a neural network \\\\(\\pi_\\theta\\\\), this policy will output a probability distribution over actions (stochastic policy).\n - \"stochastic\n - Our objective then is **to maximize the performance of the parameterized policy using gradient ascent**.\n - To do that, we control the parameter \\\\(\\theta\\\\) that will affect the distribution of actions over a state.\n\n\"Policy\n\n- Next time, we'll study the *actor-critic* method, which is a combination of value-based and policy-based methods.\n\nConsequently, thanks to policy-based methods, we can directly optimize our policy \\\\(\\pi_\\theta\\\\) to output a probability distribution over actions \\\\(\\pi_\\theta(a|s)\\\\) that leads to the best cumulative return.\nTo do that, we define an objective function \\\\(J(\\theta)\\\\), that is, the expected cumulative reward, and we **want to find the value \\\\(\\theta\\\\) that maximizes this objective function**.\n\n## The difference between policy-based and policy-gradient methods\n\nPolicy-gradient methods, what we're going to study in this unit, is a subclass of policy-based methods. In policy-based methods, the optimization is most of the time *on-policy* since for each update, we only use data (trajectories) collected **by our most recent version of** \\\\(\\pi_\\theta\\\\).\n\nThe difference between these two methods **lies on how we optimize the parameter** \\\\(\\theta\\\\):\n\n- In *policy-based methods*, we search directly for the optimal policy. We can optimize the parameter \\\\(\\theta\\\\) **indirectly** by maximizing the local approximation of the objective function with techniques like hill climbing, simulated annealing, or evolution strategies.\n- In *policy-gradient methods*, because it is a subclass of the policy-based methods, we search directly for the optimal policy. But we optimize the parameter \\\\(\\theta\\\\) **directly** by performing the gradient ascent on the performance of the objective function \\\\(J(\\theta)\\\\).\n\nBefore diving more into how policy-gradient methods work (the objective function, policy gradient theorem, gradient ascent, etc.), let's study the advantages and disadvantages of policy-based methods.\n"},"source":{"kind":"string","value":"huggingface/deep-rl-class/blob/main/units/en/unit4/what-are-policy-based-methods.mdx"}}},{"rowIdx":917,"cells":{"text":{"kind":"string","value":"!--Copyright 2022 The HuggingFace Team. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with\nthe License. You may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\n⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be\nrendered properly in your Markdown viewer.\n\n-->\n\n# LLaMA\n\n## Overview\n\nThe LLaMA model was proposed in [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. It is a collection of foundation language models ranging from 7B to 65B parameters.\n\nThe abstract from the paper is the following:\n\n*We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community. *\n\nThis model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama).\n\n## Usage tips\n\n- Weights for the LLaMA models can be obtained from by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form)\n- After downloading the weights, they will need to be converted to the Hugging Face Transformers format using the [conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). The script can be called with the following (example) command:\n\n```bash\npython src/transformers/models/llama/convert_llama_weights_to_hf.py \\\n --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path\n```\n\n- After conversion, the model and tokenizer can be loaded via:\n\n```python\nfrom transformers import LlamaForCausalLM, LlamaTokenizer\n\ntokenizer = LlamaTokenizer.from_pretrained(\"/output/path\")\nmodel = LlamaForCausalLM.from_pretrained(\"/output/path\")\n```\n\nNote that executing the script requires enough CPU RAM to host the whole model in float16 precision (even if the biggest versions\ncome in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). For the 65B model, it's thus 130GB of RAM needed.\n\n- The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. \"Banana\"), the tokenizer does not prepend the prefix space to the string.\n\nThis model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). The Flax version of the implementation was contributed by [afmck](https://huggingface.co/afmck) with the code in the implementation based on Hugging Face's Flax GPT-Neo.\n\n\nBased on the original LLaMA model, Meta AI has released some follow-up works:\n\n- **Llama2**: Llama2 is an improved version of Llama with some architectural tweaks (Grouped Query Attention), and is pre-trained on 2Trillion tokens. Refer to the documentation of Llama2 which can be found [here](llama2).\n\n## Resources\n\nA list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LLaMA. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.\n\n\n\n- A [notebook](https://colab.research.google.com/github/bigscience-workshop/petals/blob/main/examples/prompt-tuning-sst2.ipynb#scrollTo=f04ba4d2) on how to use prompt tuning to adapt the LLaMA model for text classification task. 🌎\n\n\n\n- [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama#stackllama-a-hands-on-guide-to-train-llama-with-rlhf), a blog post about how to train LLaMA to answer questions on [Stack Exchange](https://stackexchange.com/) with RLHF.\n\n⚗️ Optimization\n- A [notebook](https://colab.research.google.com/drive/1SQUXq1AMZPSLD4mk3A3swUIc6Y2dclme?usp=sharing) on how to fine-tune LLaMA model using xturing library on GPU which has limited memory. 🌎 \n\n⚡️ Inference\n- A [notebook](https://colab.research.google.com/github/DominguesM/alpaca-lora-ptbr-7b/blob/main/notebooks/02%20-%20Evaluate.ipynb) on how to run the LLaMA Model using PeftModel from the 🤗 PEFT library. 🌎 \n- A [notebook](https://colab.research.google.com/drive/1l2GiSSPbajVyp2Nk3CFT4t3uH6-5TiBe?usp=sharing) on how to load a PEFT adapter LLaMA model with LangChain. 🌎\n\n🚀 Deploy\n- A [notebook](https://colab.research.google.com/github/lxe/simple-llama-finetuner/blob/master/Simple_LLaMA_FineTuner.ipynb#scrollTo=3PM_DilAZD8T) on how to fine-tune LLaMA model using LoRA method via the 🤗 PEFT library with intuitive UI. 🌎 \n- A [notebook](https://github.com/aws/amazon-sagemaker-examples/blob/main/introduction_to_amazon_algorithms/jumpstart-foundation-models/text-generation-open-llama.ipynb) on how to deploy Open-LLaMA model for text generation on Amazon SageMaker. 🌎 \n\n## LlamaConfig\n\n[[autodoc]] LlamaConfig\n\n## LlamaTokenizer\n\n[[autodoc]] LlamaTokenizer\n - build_inputs_with_special_tokens\n - get_special_tokens_mask\n - create_token_type_ids_from_sequences\n - save_vocabulary\n\n## LlamaTokenizerFast\n\n[[autodoc]] LlamaTokenizerFast\n - build_inputs_with_special_tokens\n - get_special_tokens_mask\n - create_token_type_ids_from_sequences\n - update_post_processor\n - save_vocabulary\n\n## LlamaModel\n\n[[autodoc]] LlamaModel\n - forward\n\n## LlamaForCausalLM\n\n[[autodoc]] LlamaForCausalLM\n - forward\n\n## LlamaForSequenceClassification\n\n[[autodoc]] LlamaForSequenceClassification\n - forward\n\n## FlaxLlamaModel\n\n[[autodoc]] FlaxLlamaModel\n - __call__\n\n## FlaxLlamaForCausalLM\n\n[[autodoc]] FlaxLlamaForCausalLM\n - __call__\n"},"source":{"kind":"string","value":"huggingface/transformers/blob/main/docs/source/en/model_doc/llama.md"}}},{"rowIdx":918,"cells":{"text":{"kind":"string","value":" Components\n\nWhen building a Tokenizer, you can attach various types of components to\nthis Tokenizer in order to customize its behavior. This page lists most\nprovided components.\n\n## Normalizers\n\nA `Normalizer` is in charge of pre-processing the input string in order\nto normalize it as relevant for a given use case. Some common examples\nof normalization are the Unicode normalization algorithms (NFD, NFKD,\nNFC & NFKC), lowercasing etc... The specificity of `tokenizers` is that\nwe keep track of the alignment while normalizing. This is essential to\nallow mapping from the generated tokens back to the input text.\n\nThe `Normalizer` is optional.\n\n\n\n| Name | Description | Example |\n| :--- | :--- | :--- |\n| NFD | NFD unicode normalization | |\n| NFKD | NFKD unicode normalization | |\n| NFC | NFC unicode normalization | |\n| NFKC | NFKC unicode normalization | |\n| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ`
Output: `hello`ὀδυσσεύς` |\n| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `\"`hi`\"`
Output: `\"hi\"` |\n| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Ouput: `e` |\n| Replace | Replaces a custom string or regexp and changes it with given content | `Replace(\"a\", \"e\")` will behave like this:
Input: `\"banana\"`
Ouput: `\"benene\"` |\n| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are:
  • clean_text
  • handle_chinese_chars
  • strip_accents
  • lowercase
| |\n| Sequence | Composes multiple normalizers that will run in the provided order | `Sequence([NFKC(), Lowercase()])` |\n
\n\n| Name | Description | Example |\n| :--- | :--- | :--- |\n| NFD | NFD unicode normalization | |\n| NFKD | NFKD unicode normalization | |\n| NFC | NFC unicode normalization | |\n| NFKC | NFKC unicode normalization | |\n| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ`
Output: `hello`ὀδυσσεύς` |\n| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `\"`hi`\"`
Output: `\"hi\"` |\n| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Ouput: `e` |\n| Replace | Replaces a custom string or regexp and changes it with given content | `Replace(\"a\", \"e\")` will behave like this:
Input: `\"banana\"`
Ouput: `\"benene\"` |\n| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are:
  • clean_text
  • handle_chinese_chars
  • strip_accents
  • lowercase
| |\n| Sequence | Composes multiple normalizers that will run in the provided order | `Sequence::new(vec![NFKC, Lowercase])` |\n
\n\n| Name | Description | Example |\n| :--- | :--- | :--- |\n| NFD | NFD unicode normalization | |\n| NFKD | NFKD unicode normalization | |\n| NFC | NFC unicode normalization | |\n| NFKC | NFKC unicode normalization | |\n| Lowercase | Replaces all uppercase to lowercase | Input: `HELLO ὈΔΥΣΣΕΎΣ`
Output: `hello`ὀδυσσεύς` |\n| Strip | Removes all whitespace characters on the specified sides (left, right or both) of the input | Input: `\"`hi`\"`
Output: `\"hi\"` |\n| StripAccents | Removes all accent symbols in unicode (to be used with NFD for consistency) | Input: `é`
Ouput: `e` |\n| Replace | Replaces a custom string or regexp and changes it with given content | `Replace(\"a\", \"e\")` will behave like this:
Input: `\"banana\"`
Ouput: `\"benene\"` |\n| BertNormalizer | Provides an implementation of the Normalizer used in the original BERT. Options that can be set are:
  • cleanText
  • handleChineseChars
  • stripAccents
  • lowercase
| |\n| Sequence | Composes multiple normalizers that will run in the provided order | |\n
\n
\n\n## Pre-tokenizers\n\nThe `PreTokenizer` takes care of splitting the input according to a set\nof rules. This pre-processing lets you ensure that the underlying\n`Model` does not build tokens across multiple \"splits\". For example if\nyou don't want to have whitespaces inside a token, then you can have a\n`PreTokenizer` that splits on these whitespaces.\n\nYou can easily combine multiple `PreTokenizer` together using a\n`Sequence` (see below). The `PreTokenizer` is also allowed to modify the\nstring, just like a `Normalizer` does. This is necessary to allow some\ncomplicated algorithms that require to split before normalizing (e.g.\nthe ByteLevel)\n\n\n\n| Name | Description | Example |\n| :--- | :--- | :--- |\n| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
  • Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.
  • A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
  • For non ascii characters, it gets completely unreadable, but it works nonetheless!
| Input: `\"Hello my friend, how are you?\"`
Ouput: `\"Hello\", \"Ġmy\", Ġfriend\", \",\", \"Ġhow\", \"Ġare\", \"Ġyou\", \"?\"` |\n| Whitespace | Splits on word boundaries (using the following regular expression: `\\w+|[^\\w\\s]+` | Input: `\"Hello there!\"`
Output: `\"Hello\", \"there\", \"!\"` |\n| WhitespaceSplit | Splits on any whitespace character | Input: `\"Hello there!\"`
Output: `\"Hello\", \"there!\"` |\n| Punctuation | Will isolate all punctuation characters | Input: `\"Hello?\"`
Ouput: `\"Hello\", \"?\"` |\n| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `\"Hello there\"`
Ouput: `\"Hello\", \"▁there\"` |\n| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `\"Helloxthere\"`
Ouput: `\"Hello\", \"there\"` |\n| Digits | Splits the numbers from any other characters. | Input: `\"Hello123there\"`
Output: ``\"Hello\", \"123\", \"there\"`` |\n| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary.
  • pattern should be either a custom string or regexp.
  • behavior should be one of:
    • removed
    • isolated
    • merged_with_previous
    • merged_with_next
    • contiguous
  • invert should be a boolean flag.
| Example with pattern = ` `, behavior = `\"isolated\"`, invert = `False`:
Input: `\"Hello, how are you?\"`
Output: `\"Hello,\", \" \", \"how\", \" \", \"are\", \" \", \"you?\"` |\n| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence([Punctuation(), WhitespaceSplit()])` |\n
\n\n| Name | Description | Example |\n| :--- | :--- | :--- |\n| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
  • Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.
  • A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
  • For non ascii characters, it gets completely unreadable, but it works nonetheless!
| Input: `\"Hello my friend, how are you?\"`
Ouput: `\"Hello\", \"Ġmy\", Ġfriend\", \",\", \"Ġhow\", \"Ġare\", \"Ġyou\", \"?\"` |\n| Whitespace | Splits on word boundaries (using the following regular expression: `\\w+|[^\\w\\s]+` | Input: `\"Hello there!\"`
Output: `\"Hello\", \"there\", \"!\"` |\n| WhitespaceSplit | Splits on any whitespace character | Input: `\"Hello there!\"`
Output: `\"Hello\", \"there!\"` |\n| Punctuation | Will isolate all punctuation characters | Input: `\"Hello?\"`
Ouput: `\"Hello\", \"?\"` |\n| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `\"Hello there\"`
Ouput: `\"Hello\", \"▁there\"` |\n| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `\"Helloxthere\"`
Ouput: `\"Hello\", \"there\"` |\n| Digits | Splits the numbers from any other characters. | Input: `\"Hello123there\"`
Output: ``\"Hello\", \"123\", \"there\"`` |\n| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary.
  • pattern should be either a custom string or regexp.
  • behavior should be one of:
    • Removed
    • Isolated
    • MergedWithPrevious
    • MergedWithNext
    • Contiguous
  • invert should be a boolean flag.
| Example with pattern = ` `, behavior = `\"isolated\"`, invert = `False`:
Input: `\"Hello, how are you?\"`
Output: `\"Hello,\", \" \", \"how\", \" \", \"are\", \" \", \"you?\"` |\n| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | `Sequence::new(vec![Punctuation, WhitespaceSplit])` |\n
\n\n| Name | Description | Example |\n| :--- | :--- | :--- |\n| ByteLevel | Splits on whitespaces while remapping all the bytes to a set of visible characters. This technique as been introduced by OpenAI with GPT-2 and has some more or less nice properties:
  • Since it maps on bytes, a tokenizer using this only requires **256** characters as initial alphabet (the number of values a byte can have), as opposed to the 130,000+ Unicode characters.
  • A consequence of the previous point is that it is absolutely unnecessary to have an unknown token using this since we can represent anything with 256 tokens (Youhou!! 🎉🎉)
  • For non ascii characters, it gets completely unreadable, but it works nonetheless!
| Input: `\"Hello my friend, how are you?\"`
Ouput: `\"Hello\", \"Ġmy\", Ġfriend\", \",\", \"Ġhow\", \"Ġare\", \"Ġyou\", \"?\"` |\n| Whitespace | Splits on word boundaries (using the following regular expression: `\\w+|[^\\w\\s]+` | Input: `\"Hello there!\"`
Output: `\"Hello\", \"there\", \"!\"` |\n| WhitespaceSplit | Splits on any whitespace character | Input: `\"Hello there!\"`
Output: `\"Hello\", \"there!\"` |\n| Punctuation | Will isolate all punctuation characters | Input: `\"Hello?\"`
Ouput: `\"Hello\", \"?\"` |\n| Metaspace | Splits on whitespaces and replaces them with a special char “▁” (U+2581) | Input: `\"Hello there\"`
Ouput: `\"Hello\", \"▁there\"` |\n| CharDelimiterSplit | Splits on a given character | Example with `x`:
Input: `\"Helloxthere\"`
Ouput: `\"Hello\", \"there\"` |\n| Digits | Splits the numbers from any other characters. | Input: `\"Hello123there\"`
Output: ``\"Hello\", \"123\", \"there\"`` |\n| Split | Versatile pre-tokenizer that splits on provided pattern and according to provided behavior. The pattern can be inverted if necessary.
  • pattern should be either a custom string or regexp.
  • behavior should be one of:
    • removed
    • isolated
    • mergedWithPrevious
    • mergedWithNext
    • contiguous
  • invert should be a boolean flag.
| Example with pattern = ` `, behavior = `\"isolated\"`, invert = `False`:
Input: `\"Hello, how are you?\"`
Output: `\"Hello,\", \" \", \"how\", \" \", \"are\", \" \", \"you?\"` |\n| Sequence | Lets you compose multiple `PreTokenizer` that will be run in the given order | |\n
\n
\n\n## Models\n\nModels are the core algorithms used to actually tokenize, and therefore,\nthey are the only mandatory component of a Tokenizer.\n\n| Name | Description |\n| :--- | :--- |\n| WordLevel | This is the “classic” tokenization algorithm. It let’s you simply map words to IDs without anything fancy. This has the advantage of being really simple to use and understand, but it requires extremely large vocabularies for a good coverage. Using this `Model` requires the use of a `PreTokenizer`. No choice will be made by this model directly, it simply maps input tokens to IDs. |\n| BPE | One of the most popular subword tokenization algorithm. The Byte-Pair-Encoding works by starting with characters, while merging those that are the most frequently seen together, thus creating new tokens. It then works iteratively to build new tokens out of the most frequent pairs it sees in a corpus. BPE is able to build words it has never seen by using multiple subword tokens, and thus requires smaller vocabularies, with less chances of having “unk” (unknown) tokens. |\n| WordPiece | This is a subword tokenization algorithm quite similar to BPE, used mainly by Google in models like BERT. It uses a greedy algorithm, that tries to build long words first, splitting in multiple tokens when entire words don’t exist in the vocabulary. This is different from BPE that starts from characters, building bigger tokens as possible. It uses the famous `##` prefix to identify tokens that are part of a word (ie not starting a word). |\n| Unigram | Unigram is also a subword tokenization algorithm, and works by trying to identify the best set of subword tokens to maximize the probability for a given sentence. This is different from BPE in the way that this is not deterministic based on a set of rules applied sequentially. Instead Unigram will be able to compute multiple ways of tokenizing, while choosing the most probable one. |\n\n## Post-Processors\n\nAfter the whole pipeline, we sometimes want to insert some special\ntokens before feed a tokenized string into a model like \"[CLS] My\nhorse is amazing [SEP]\". The `PostProcessor` is the component doing\njust that.\n\n| Name | Description | Example |\n| :--- | :--- | :--- |\n| TemplateProcessing | Let’s you easily template the post processing, adding special tokens, and specifying the `type_id` for each sequence/special token. The template is given two strings representing the single sequence and the pair of sequences, as well as a set of special tokens to use. | Example, when specifying a template with these values:
  • single: `\"[CLS] $A [SEP]\"`
  • pair: `\"[CLS] $A [SEP] $B [SEP]\"`
  • special tokens:
    • `\"[CLS]\"`
    • `\"[SEP]\"`

Input: `(\"I like this\", \"but not this\")`
Output: `\"[CLS] I like this [SEP] but not this [SEP]\"` |\n\n## Decoders\n\nThe Decoder knows how to go from the IDs used by the Tokenizer, back to\na readable piece of text. Some `Normalizer` and `PreTokenizer` use\nspecial characters or identifiers that need to be reverted for example.\n\n| Name | Description |\n| :--- | :--- |\n| ByteLevel | Reverts the ByteLevel PreTokenizer. This PreTokenizer encodes at the byte-level, using a set of visible Unicode characters to represent each byte, so we need a Decoder to revert this process and get something readable again. |\n| Metaspace | Reverts the Metaspace PreTokenizer. This PreTokenizer uses a special identifer `▁` to identify whitespaces, and so this Decoder helps with decoding these. |\n| WordPiece | Reverts the WordPiece Model. This model uses a special identifier `##` for continuing subwords, and so this Decoder helps with decoding these. |\n"},"source":{"kind":"string","value":"huggingface/tokenizers/blob/main/docs/source-doc-builder/components.mdx"}}},{"rowIdx":919,"cells":{"text":{"kind":"string","value":" Notifications\n\nNotifications allow you to know when new activities (Pull Requests or discussions) happen on models, datasets, and Spaces belonging to users or organizations you are watching.\n\nBy default, you'll receive a notification if:\n\n- Someone mentions you in a discussion/PR.\n- A new comment is posted in a discussion/PR you participated in.\n- A new discussion/PR or comment is posted in one of the repositories of an organization or user you are watching.\n\n![Notifications page](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/notifications-page.png)\n\nYou'll get new notifications by email and [directly on the website](https://huggingface.co/notifications), you can change this in your [notifications settings](#notifications-settings).\n\n## Filtering and managing notifications\n\nOn the [notifications page](https://huggingface.co/notifications), you have several options for filtering and managing your notifications more effectively:\n - Filter by Repository: Choose to display notifications from a specific repository only.\n - Filter by Read Status: Display only unread notifications or all notifications.\n - Filter by Participation: Show notifications you have participated in or those which you have been directly mentioned.\n\nAdditionally, you can take the following actions to manage your notifications:\n\n - Mark as Read/Unread: Change the status of notifications to mark them as read or unread.\n - Mark as Done: Once marked as done, notifications will no longer appear in the notification center (they are deleted).\n \nBy default, changes made to notifications will only apply to the selected notifications on the screen. However, you can also apply changes to all matching notifications (like in Gmail for instance) for greater convenience.\n\n
\n\n\n
\n\n## Watching users and organizations\n\nBy default, you'll be watching all the organizations you are a member of and will be notified of any new activity on those.\n\nYou can also choose to get notified on arbitrary users or organizations. To do so, use the \"Watch repos\" button on their HF profiles. Note that you can also quickly watch/unwatch users and organizations directly from your [notifications settings](#notifications-settings).\n\n_Unlike Github or similar services, you cannot watch a specific repository. You must watch users/organizations to get notified about any new activity on any of their repositories. The goal is to simplify this functionality for users as much as possible and to make sure you don't miss anything you might be interested in._\n\n## Notifications settings\n\nIn your [notifications settings](https://huggingface.co/settings/notifications) page, you can choose specific channels to get notified on depending on the type of activity, for example, receiving an email for direct mentions but only a web notification for new activity on watched users and organizations. By default, you'll get an email and a web notification for any new activity but feel free to adjust your settings depending on your needs.\n\n_Note that clicking the unsubscribe link in an email will unsubscribe you for the type of activity, eg direct mentions._\n\n![Notifications settings page](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/notifications-settings.png)\n\nYou can quickly add any user/organization to your watch list by searching them by name using the dedicated search bar.\nUnsubscribe from a specific user/organization simply by unticking the corresponding checkbox.\n\n## Mute notifications for a specific repository\n\nIt's possible to mute notifications for a particular repository by using the \"Mute notifications\" action in the repository's contextual menu.\nThis will prevent you from receiving any new notifications for that particular repository. You can unmute the repository at any time by clicking the \"Unmute notifications\" action in the same repository menu.\n\n![mute notification menu](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/notifications-mute-menu.png)\n\n_Note, if a repository is muted, you won't receive any new notification unless you're directly mentioned or participating to a discussion._ \n\nThe list of muted repositories is available from the notifications settings page:\n\n![Notifications settings page muted repositories](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/notifications-settings-muted.png)\n"},"source":{"kind":"string","value":"huggingface/hub-docs/blob/main/docs/hub/notifications.md"}}},{"rowIdx":920,"cells":{"text":{"kind":"string","value":"--\ntitle: \"BERT 101 - State Of The Art NLP Model Explained\"\nthumbnail: /blog/assets/52_bert_101/thumbnail.jpg\nauthors:\n- user: britneymuller\n---\n\n\n\n# BERT 101 🤗 State Of The Art NLP Model Explained\n\n\n\n\n## What is BERT?\n\nBERT, short for Bidirectional Encoder Representations from Transformers, is a Machine Learning (ML) model for natural language processing. It was developed in 2018 by researchers at Google AI Language and serves as a swiss army knife solution to 11+ of the most common language tasks, such as sentiment analysis and named entity recognition. \n\nLanguage has historically been difficult for computers to ‘understand’. Sure, computers can collect, store, and read text inputs but they lack basic language _context_.\n\nSo, along came Natural Language Processing (NLP): the field of artificial intelligence aiming for computers to read, analyze, interpret and derive meaning from text and spoken words. This practice combines linguistics, statistics, and Machine Learning to assist computers in ‘understanding’ human language.\n\nIndividual NLP tasks have traditionally been solved by individual models created for each specific task. That is, until— BERT!\n\nBERT revolutionized the NLP space by solving for 11+ of the most common NLP tasks (and better than previous models) making it the jack of all NLP trades. \n\nIn this guide, you'll learn what BERT is, why it’s different, and how to get started using BERT:\n\n1. [What is BERT used for?](#1-what-is-bert-used-for)\n2. [How does BERT work?](#2-how-does-bert-work)\n3. [BERT model size & architecture](#3-bert-model-size--architecture)\n4. [BERT’s performance on common language tasks](#4-berts-performance-on-common-language-tasks)\n5. [Environmental impact of deep learning](#5-enviornmental-impact-of-deep-learning)\n6. [The open source power of BERT](#6-the-open-source-power-of-bert)\n7. [How to get started using BERT](#7-how-to-get-started-using-bert)\n8. [BERT FAQs](#8-bert-faqs)\n9. [Conclusion](#9-conclusion)\n\nLet's get started! 🚀\n\n\n## 1. What is BERT used for?\n\nBERT can be used on a wide variety of language tasks:\n\n- Can determine how positive or negative a movie’s reviews are. [(Sentiment Analysis)](https://huggingface.co/blog/sentiment-analysis-python)\n- Helps chatbots answer your questions. [(Question answering)](https://huggingface.co/tasks/question-answering)\n- Predicts your text when writing an email (Gmail). [(Text prediction)](https://huggingface.co/tasks/fill-mask)\n- Can write an article about any topic with just a few sentence inputs. [(Text generation)](https://huggingface.co/tasks/text-generation)\n- Can quickly summarize long legal contracts. [(Summarization)](https://huggingface.co/tasks/summarization)\n- Can differentiate words that have multiple meanings (like ‘bank’) based on the surrounding text. (Polysemy resolution)\n\n**There are many more language/NLP tasks + more detail behind each of these.**\n\n***Fun Fact:*** You interact with NLP (and likely BERT) almost every single day! \n\nNLP is behind Google Translate, voice assistants (Alexa, Siri, etc.), chatbots, Google searches, voice-operated GPS, and more.\n\n---\n\n### 1.1 Example of BERT\n\n\nBERT helps Google better surface (English) results for nearly all searches since November of 2020. \n\nHere’s an example of how BERT helps Google better understand specific searches like:\n\n
\n \n
Source
\n
\n\n\nPre-BERT Google surfaced information about getting a prescription filled. \n\nPost-BERT Google understands that “for someone” relates to picking up a prescription for someone else and the search results now help to answer that.\n\n---\n\n## 2. How does BERT Work?\n\nBERT works by leveraging the following:\n\n### 2.1 Large amounts of training data\n\nA massive dataset of 3.3 Billion words has contributed to BERT’s continued success. \n\nBERT was specifically trained on Wikipedia (\\~2.5B words) and Google’s BooksCorpus (\\~800M words). These large informational datasets contributed to BERT’s deep knowledge not only of the English language but also of our world! 🚀\n\nTraining on a dataset this large takes a long time. BERT’s training was made possible thanks to the novel Transformer architecture and sped up by using TPUs (Tensor Processing Units - Google’s custom circuit built specifically for large ML models). —64 TPUs trained BERT over the course of 4 days.\n\n**Note:** Demand for smaller BERT models is increasing in order to use BERT within smaller computational environments (like cell phones and personal computers). [23 smaller BERT models were released in March 2020](https://github.com/google-research/bert). [DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert) offers a lighter version of BERT; runs 60% faster while maintaining over 95% of BERT’s performance.\n\n### 2.2 What is a Masked Language Model?\n\nMLM enables/enforces bidirectional learning from text by masking (hiding) a word in a sentence and forcing BERT to bidirectionally use the words on either side of the covered word to predict the masked word. This had never been done before!\n\n**Fun Fact:** We naturally do this as humans! \n\n**Masked Language Model Example:**\n\nImagine your friend calls you while camping in Glacier National Park and their service begins to cut out. The last thing you hear before the call drops is:\n\n

Friend: “Dang! I’m out fishing and a huge trout just [blank] my line!”

\n\nCan you guess what your friend said?? \n\nYou’re naturally able to predict the missing word by considering the words bidirectionally before and after the missing word as context clues (in addition to your historical knowledge of how fishing works). Did you guess that your friend said, ‘broke’? That’s what we predicted as well but even we humans are error-prone to some of these methods. \n\n**Note:** This is why you’ll often see a “Human Performance” comparison to a language model’s performance scores. And yes, newer models like BERT can be more accurate than humans! 🤯\n\nThe bidirectional methodology you did to fill in the [blank] word above is similar to how BERT attains state-of-the-art accuracy. A random 15% of tokenized words are hidden during training and BERT’s job is to correctly predict the hidden words. Thus, directly teaching the model about the English language (and the words we use). Isn’t that neat?\n\nPlay around with BERT’s masking predictions: \n\n
\n
\n
\n
\n
\n \n \n \n Hosted inference API\n
\n \n \n \n \n \n \n \n
\n
\n \n
\n \n \n \n \n \n Fill-Mask\n
\n
\n
\n
\n
Examples
\n \n \n \n
\n
\n
\n
\n
Mask token: \n [MASK]\n
\n \n \n
\n
\n
This model can be loaded on the Inference API on-demand.
\n
\n
\n \n \n
\n
\n
\n\n**Fun Fact:** Masking has been around a long time - [1953 Paper on Cloze procedure (or ‘Masking’)](https://psycnet.apa.org/record/1955-00850-001). \n\n### 2.3 What is Next Sentence Prediction?\n\nNSP (Next Sentence Prediction) is used to help BERT learn about relationships between sentences by predicting if a given sentence follows the previous sentence or not. \n\n**Next Sentence Prediction Example:**\n\n1. Paul went shopping. He bought a new shirt. (correct sentence pair)\n2. Ramona made coffee. Vanilla ice cream cones for sale. (incorrect sentence pair)\n\nIn training, 50% correct sentence pairs are mixed in with 50% random sentence pairs to help BERT increase next sentence prediction accuracy.\n\n**Fun Fact:** BERT is trained on both MLM (50%) and NSP (50%) at the same time.\n\n### 2.4 Transformers\n\nThe Transformer architecture makes it possible to parallelize ML training extremely efficiently. Massive parallelization thus makes it feasible to train BERT on large amounts of data in a relatively short period of time. \n\nTransformers use an attention mechanism to observe relationships between words. A concept originally proposed in the popular [2017 Attention Is All You Need](https://proceedings.neurips.cc/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf) paper sparked the use of Transformers in NLP models all around the world.\n\n\n

\n\n>Since their introduction in 2017, Transformers have rapidly become the state-of-the-art approach to tackle tasks in many domains such as natural language processing, speech recognition, and computer vision. In short, if you’re doing deep learning, then you need Transformers!\n\n

Lewis Tunstall, Hugging Face ML Engineer & Author of Natural Language Processing with Transformers

\n

\n\nTimeline of popular Transformer model releases:\n\n
\n \n
Source
\n
\n\n#### 2.4.1 How do Transformers work?\n\nTransformers work by leveraging attention, a powerful deep-learning algorithm, first seen in computer vision models.\n\n—Not all that different from how we humans process information through attention. We are incredibly good at forgetting/ignoring mundane daily inputs that don’t pose a threat or require a response from us. For example, can you remember everything you saw and heard coming home last Tuesday? Of course not! Our brain’s memory is limited and valuable. Our recall is aided by our ability to forget trivial inputs. \n\nSimilarly, Machine Learning models need to learn how to pay attention only to the things that matter and not waste computational resources processing irrelevant information. Transformers create differential weights signaling which words in a sentence are the most critical to further process.\n\n
\n \n
\n\nA transformer does this by successively processing an input through a stack of transformer layers, usually called the encoder. If necessary, another stack of transformer layers - the decoder - can be used to predict a target output. —BERT however, doesn’t use a decoder. Transformers are uniquely suited for unsupervised learning because they can efficiently process millions of data points.\n\nFun Fact: Google has been using your reCAPTCHA selections to label training data since 2011. The entire Google Books archive and 13 million articles from the New York Times catalog have been transcribed/digitized via people entering reCAPTCHA text. Now, reCAPTCHA is asking us to label Google Street View images, vehicles, stoplights, airplanes, etc. Would be neat if Google made us aware of our participation in this effort (as the training data likely has future commercial intent) but I digress..\n\n

\n To learn more about Transformers check out our Hugging Face Transformers Course.\n

\n\n## 3. BERT model size & architecture\n\nLet’s break down the architecture for the two original BERT models:\n\n
\n \n
\n\n\nML Architecture Glossary:\n\n| ML Architecture Parts | Definition |\n|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| Parameters: | Number of learnable variables/values available for the model. |\n| Transformer Layers: | Number of Transformer blocks. A transformer block transforms a sequence of word representations to a sequence of contextualized words (numbered representations). |\n| Hidden Size: | Layers of mathematical functions, located between the input and output, that assign weights (to words) to produce a desired result. |\n| Attention Heads: | The size of a Transformer block. |\n| Processing: | Type of processing unit used to train the model. |\n| Length of Training: | Time it took to train the model. \n\n\nHere’s how many of the above ML architecture parts BERTbase and BERTlarge has:\n\n\n| | Transformer Layers | Hidden Size | Attention Heads | Parameters | Processing | Length of Training |\n|-----------|--------------------|-------------|-----------------|------------|------------|--------------------|\n| BERTbase | 12 | 768 | 12 | 110M | 4 TPUs | 4 days |\n| BERTlarge | 24 | 1024 | 16 | 340M | 16 TPUs | 4 days |\n\n\n\nLet’s take a look at how BERTlarge’s additional layers, attention heads, and parameters have increased its performance across NLP tasks.\n\n## 4. BERT's performance on common language tasks\n\nBERT has successfully achieved state-of-the-art accuracy on 11 common NLP tasks, outperforming previous top NLP models, and is the first to outperform humans! \nBut, how are these achievements measured?\n\n### NLP Evaluation Methods: \n\n#### 4.1 SQuAD v1.1 & v2.0\n[SQuAD](https://huggingface.co/datasets/squad) (Stanford Question Answering Dataset) is a reading comprehension dataset of around 108k questions that can be answered via a corresponding paragraph of Wikipedia text. BERT’s performance on this evaluation method was a big achievement beating previous state-of-the-art models and human-level performance:\n\n
\n \n
\n\n#### 4.2 SWAG\n[SWAG](https://huggingface.co/datasets/swag) (Situations With Adversarial Generations) is an interesting evaluation in that it detects a model’s ability to infer commonsense! It does this through a large-scale dataset of 113k multiple choice questions about common sense situations. These questions are transcribed from a video scene/situation and SWAG provides the model with four possible outcomes in the next scene. The model then does its’ best at predicting the correct answer.\n\nBERT out outperformed top previous top models including human-level performance:\n\n
\n \n
\n\n#### 4.3 GLUE Benchmark\n[GLUE](https://huggingface.co/datasets/glue) (General Language Understanding Evaluation) benchmark is a group of resources for training, measuring, and analyzing language models comparatively to one another. These resources consist of nine “difficult” tasks designed to test an NLP model’s understanding. Here’s a summary of each of those tasks:\n\n
\n \n
\n\n
\n \n
\n\nWhile some of these tasks may seem irrelevant and banal, it’s important to note that these evaluation methods are _incredibly_ powerful in indicating which models are best suited for your next NLP application. \n\nAttaining performance of this caliber isn’t without consequences. Next up, let’s learn about Machine Learning's impact on the environment.\n\n\n## 5. Environmental impact of deep learning\n\nLarge Machine Learning models require massive amounts of data which is expensive in both time and compute resources.\n\nThese models also have an environmental impact: \n\n
\n \n
Source
\n
\n\nMachine Learning’s environmental impact is one of the many reasons we believe in democratizing the world of Machine Learning through open source! Sharing large pre-trained language models is essential in reducing the overall compute cost and carbon footprint of our community-driven efforts.\n\n\n## 6. The open source power of BERT\n\nUnlike other large learning models like GPT-3, BERT’s source code is publicly accessible ([view BERT’s code on Github](https://github.com/google-research/bert)) allowing BERT to be more widely used all around the world. This is a game-changer!\n\nDevelopers are now able to get a state-of-the-art model like BERT up and running quickly without spending large amounts of time and money. 🤯 \n\nDevelopers can instead focus their efforts on fine-tuning BERT to customize the model’s performance to their unique tasks. \n\nIt’s important to note that [thousands](https://huggingface.co/models?sort=downloads&search=bert) of open-source and free, pre-trained BERT models are currently available for specific use cases if you don’t want to fine-tune BERT. \n\nBERT models pre-trained for specific tasks:\n\n- [Twitter sentiment analysis](https://huggingface.co/finiteautomata/bertweet-base-sentiment-analysis)\n- [Analysis of Japanese text](https://huggingface.co/cl-tohoku/bert-base-japanese-char)\n- [Emotion categorizer (English - anger, fear, joy, etc.)](https://huggingface.co/j-hartmann/emotion-english-distilroberta-base)\n- [Clinical Notes analysis](https://huggingface.co/emilyalsentzer/Bio_ClinicalBERT)\n- [Speech to text translation](https://huggingface.co/facebook/hubert-large-ls960-ft)\n- [Toxic comment detection](https://huggingface.co/unitary/toxic-bert?)\n\nYou can also find [hundreds of pre-trained, open-source Transformer models](https://huggingface.co/models?library=transformers&sort=downloads) available on the Hugging Face Hub.\n\n\n## 7. How to get started using BERT\n\nWe've [created this notebook](https://colab.research.google.com/drive/1YtTqwkwaqV2n56NC8xerflt95Cjyd4NE?usp=sharing) so you can try BERT through this easy tutorial in Google Colab. Open the notebook or add the following code to your own. Pro Tip: Use (Shift + Click) to run a code cell.\n\nNote: Hugging Face's [pipeline class](https://huggingface.co/docs/transformers/main_classes/pipelines) makes it incredibly easy to pull in open source ML models like transformers with just a single line of code.\n\n### 7.1 Install Transformers\n\nFirst, let's install Transformers via the following code:\n\n```python\n!pip install transformers\n```\n\n### 7.2 Try out BERT\n\nFeel free to swap out the sentence below for one of your own. However, leave [MASK] in somewhere to allow BERT to predict the missing word\n\n```python\nfrom transformers import pipeline\nunmasker = pipeline('fill-mask', model='bert-base-uncased')\nunmasker(\"Artificial Intelligence [MASK] take over the world.\")\n```\n\nWhen you run the above code you should see an output like this:\n\n```\n[{'score': 0.3182411789894104,\n 'sequence': 'artificial intelligence can take over the world.',\n 'token': 2064,\n 'token_str': 'can'},\n {'score': 0.18299679458141327,\n 'sequence': 'artificial intelligence will take over the world.',\n 'token': 2097,\n 'token_str': 'will'},\n {'score': 0.05600147321820259,\n 'sequence': 'artificial intelligence to take over the world.',\n 'token': 2000,\n 'token_str': 'to'},\n {'score': 0.04519503191113472,\n 'sequence': 'artificial intelligences take over the world.',\n 'token': 2015,\n 'token_str': '##s'},\n {'score': 0.045153118669986725,\n 'sequence': 'artificial intelligence would take over the world.',\n 'token': 2052,\n 'token_str': 'would'}]\n```\n\nKind of frightening right? 🙃\n\n### 7.3 Be aware of model bias\n\nLet's see what jobs BERT suggests for a \"man\":\n\n```python\nunmasker(\"The man worked as a [MASK].\")\n```\n\nWhen you run the above code you should see an output that looks something like:\n\n```python\n[{'score': 0.09747546911239624,\n 'sequence': 'the man worked as a carpenter.',\n 'token': 10533,\n 'token_str': 'carpenter'},\n {'score': 0.052383411675691605,\n 'sequence': 'the man worked as a waiter.',\n 'token': 15610,\n 'token_str': 'waiter'},\n {'score': 0.04962698742747307,\n 'sequence': 'the man worked as a barber.',\n 'token': 13362,\n 'token_str': 'barber'},\n {'score': 0.037886083126068115,\n 'sequence': 'the man worked as a mechanic.',\n 'token': 15893,\n 'token_str': 'mechanic'},\n {'score': 0.037680838257074356,\n 'sequence': 'the man worked as a salesman.',\n 'token': 18968,\n 'token_str': 'salesman'}]\n```\n\nBERT predicted the man's job to be a Carpenter, Waiter, Barber, Mechanic, or Salesman\n\n Now let's see what jobs BERT suggesst for \"woman\"\n\n```python\nunmasker(\"The woman worked as a [MASK].\")\n```\n\nYou should see an output that looks something like:\n```python\n[{'score': 0.21981535851955414,\n 'sequence': 'the woman worked as a nurse.',\n 'token': 6821,\n 'token_str': 'nurse'},\n {'score': 0.1597413569688797,\n 'sequence': 'the woman worked as a waitress.',\n 'token': 13877,\n 'token_str': 'waitress'},\n {'score': 0.11547300964593887,\n 'sequence': 'the woman worked as a maid.',\n 'token': 10850,\n 'token_str': 'maid'},\n {'score': 0.03796879202127457,\n 'sequence': 'the woman worked as a prostitute.',\n 'token': 19215,\n 'token_str': 'prostitute'},\n {'score': 0.030423851683735847,\n 'sequence': 'the woman worked as a cook.',\n 'token': 5660,\n 'token_str': 'cook'}]\n```\n\nBERT predicted the woman's job to be a Nurse, Waitress, Maid, Prostitute, or Cook displaying a clear gender bias in professional roles.\n\n### 7.4 Some other BERT Notebooks you might enjoy:\n\n[A Visual Notebook to BERT for the First Time](https://colab.research.google.com/github/jalammar/jalammar.github.io/blob/master/notebooks/bert/A_Visual_Notebook_to_Using_BERT_for_the_First_Time.ipynb)\n\n[Train your tokenizer](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb)\n\n+Don't forget to checkout the [Hugging Face Transformers Course](https://huggingface.co/course/chapter1/1) to learn more 🎉\n\n\n## 8. BERT FAQs\n\n
\n

Can BERT be used with PyTorch?

\n
\n
\n Yes! Our experts at Hugging Face have open-sourced the PyTorch transformers repository on GitHub. \n
\n

Pro Tip: Lewis Tunstall, Leandro von Werra, and Thomas Wolf also wrote a book to help people build language applications with Hugging Face called, ‘Natural Language Processing with Transformers’.

\n
\n
\n
\n
\n

Can BERT be used with Tensorflow?

\n \n
\n
\n

How long does it take to pre-train BERT?

\n
\n
\n The 2 original BERT models were trained on 4(BERTbase) and 16(BERTlarge) Cloud TPUs for 4 days.\n
\n
\n
\n
\n

How long does it take to fine-tune BERT?

\n
\n
\n For common NLP tasks discussed above, BERT takes between 1-25mins on a single Cloud TPU or between 1-130mins on a single GPU.\n
\n
\n
\n
\n

What makes BERT different?

\n
\n
\n BERT was one of the first models in NLP that was trained in a two-step way: \n
    \n
  1. BERT was trained on massive amounts of unlabeled data (no human annotation) in an unsupervised fashion.
  2. \n
  3. BERT was then trained on small amounts of human-annotated data starting from the previous pre-trained model resulting in state-of-the-art performance.
  4. \n
\n
\n
\n
\n\n\n## 9. Conclusion\n\nBERT is a highly complex and advanced language model that helps people automate language understanding. Its ability to accomplish state-of-the-art performance is supported by training on massive amounts of data and leveraging Transformers architecture to revolutionize the field of NLP. \n\nThanks to BERT’s open-source library, and the incredible AI community’s efforts to continue to improve and share new BERT models, the future of untouched NLP milestones looks bright.\n\nWhat will you create with BERT? \n\nLearn how to [fine-tune BERT](https://huggingface.co/docs/transformers/training) for your particular use case 🤗 \n"},"source":{"kind":"string","value":"huggingface/blog/blob/main/bert-101.md"}}},{"rowIdx":921,"cells":{"text":{"kind":"string","value":" Gradio Demo: hello_blocks\n\n\n```\n!pip install -q gradio \n```\n\n\n```\nimport gradio as gr\n\ndef greet(name):\n return \"Hello \" + name + \"!\"\n\nwith gr.Blocks() as demo:\n name = gr.Textbox(label=\"Name\")\n output = gr.Textbox(label=\"Output Box\")\n greet_btn = gr.Button(\"Greet\")\n greet_btn.click(fn=greet, inputs=name, outputs=output, api_name=\"greet\")\n\nif __name__ == \"__main__\":\n demo.launch()\n```\n"},"source":{"kind":"string","value":"gradio-app/gradio/blob/main/demo/hello_blocks/run.ipynb"}}},{"rowIdx":922,"cells":{"text":{"kind":"string","value":" Metric Card for FrugalScore\n\n\n## Metric Description\nFrugalScore is a reference-based metric for Natural Language Generation (NLG) model evaluation. It is based on a distillation approach that allows to learn a fixed, low cost version of any expensive NLG metric, while retaining most of its original performance.\n\nThe FrugalScore models are obtained by continuing the pretraining of small models on a synthetic dataset constructed using summarization, backtranslation and denoising models. During the training, the small models learn the internal mapping of the expensive metric, including any similarity function.\n\n## How to use \n\nWhen loading FrugalScore, you can indicate the model you wish to use to compute the score. The default model is `moussaKam/frugalscore_tiny_bert-base_bert-score`, and a full list of models can be found in the [Limitations and bias](#Limitations-and-bias) section.\n\n```python\n>>> from datasets import load_metric\n>>> frugalscore = load_metric(\"frugalscore\", \"moussaKam/frugalscore_medium_bert-base_mover-score\")\n```\n\nFrugalScore calculates how good are the predictions given some references, based on a set of scores.\n\nThe inputs it takes are:\n\n`predictions`: a list of strings representing the predictions to score. \n\n`references`: a list of string representing the references for each prediction. \n\nIts optional arguments are:\n\n`batch_size`: the batch size for predictions (default value is `32`).\n\n`max_length`: the maximum sequence length (default value is `128`).\n\n`device`: either \"gpu\" or \"cpu\" (default value is `None`). \n\n```python\n>>> results = frugalscore.compute(predictions=['hello there', 'huggingface'], references=['hello world', 'hugging face'], batch_size=16, max_length=64, device=\"gpu\")\n```\n\n## Output values\n\nThe output of FrugalScore is a dictionary with the list of scores for each prediction-reference pair:\n```python\n{'scores': [0.6307541, 0.6449357]}\n```\n\n### Values from popular papers\nThe [original FrugalScore paper](https://arxiv.org/abs/2110.08559) reported that FrugalScore-Tiny retains 97.7/94.7% of the original performance compared to [BertScore](https://huggingface.co/metrics/bertscore) while running 54 times faster and having 84 times less parameters.\n\n## Examples \n\nMaximal values (exact match between `references` and `predictions`): \n\n```python\n>>> from datasets import load_metric\n>>> frugalscore = load_metric(\"frugalscore\")\n>>> results = frugalscore.compute(predictions=['hello world'], references=['hello world'])\n>>> print(results)\n{'scores': [0.9891098]}\n```\n\nPartial values: \n\n```python\n>>> from datasets import load_metric\n>>> frugalscore = load_metric(\"frugalscore\")\n>>> results = frugalscore.compute(predictions=['hello world'], references=['hugging face'])\n>>> print(results)\n{'scores': [0.42482382]}\n```\n\n## Limitations and bias\n\nFrugalScore is based on [BertScore](https://huggingface.co/metrics/bertscore) and [MoverScore](https://arxiv.org/abs/1909.02622), and the models used are based on the original models used for these scores.\n\nThe full list of available models for FrugalScore is:\n\n| FrugalScore | Student | Teacher | Method |\n|----------------------------------------------------|-------------|----------------|------------|\n| [moussaKam/frugalscore_tiny_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_bert-base_bert-score) | BERT-tiny | BERT-Base | BERTScore |\n| [moussaKam/frugalscore_small_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_small_bert-base_bert-score) | BERT-small | BERT-Base | BERTScore |\n| [moussaKam/frugalscore_medium_bert-base_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_bert-base_bert-score) | BERT-medium | BERT-Base | BERTScore |\n| [moussaKam/frugalscore_tiny_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_roberta_bert-score) | BERT-tiny | RoBERTa-Large | BERTScore |\n| [moussaKam/frugalscore_small_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_small_roberta_bert-score) | BERT-small | RoBERTa-Large | BERTScore |\n| [moussaKam/frugalscore_medium_roberta_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_roberta_bert-score) | BERT-medium | RoBERTa-Large | BERTScore |\n| [moussaKam/frugalscore_tiny_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_tiny_deberta_bert-score) | BERT-tiny | DeBERTa-XLarge | BERTScore |\n| [moussaKam/frugalscore_small_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_small_deberta_bert-score) | BERT-small | DeBERTa-XLarge | BERTScore |\n| [moussaKam/frugalscore_medium_deberta_bert-score](https://huggingface.co/moussaKam/frugalscore_medium_deberta_bert-score) | BERT-medium | DeBERTa-XLarge | BERTScore |\n| [moussaKam/frugalscore_tiny_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_tiny_bert-base_mover-score) | BERT-tiny | BERT-Base | MoverScore |\n| [moussaKam/frugalscore_small_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_small_bert-base_mover-score) | BERT-small | BERT-Base | MoverScore |\n| [moussaKam/frugalscore_medium_bert-base_mover-score](https://huggingface.co/moussaKam/frugalscore_medium_bert-base_mover-score) | BERT-medium | BERT-Base | MoverScore |\n\nDepending on the size of the model picked, the loading time will vary: the `tiny` models will load very quickly, whereas the `medium` ones can take several minutes, depending on your Internet connection. \n\n## Citation\n```bibtex\n@article{eddine2021frugalscore,\n title={FrugalScore: Learning Cheaper, Lighter and Faster Evaluation Metrics for Automatic Text Generation},\n author={Eddine, Moussa Kamal and Shang, Guokan and Tixier, Antoine J-P and Vazirgiannis, Michalis},\n journal={arXiv preprint arXiv:2110.08559},\n year={2021}\n}\n```\n\n## Further References\n- [Original FrugalScore code](https://github.com/moussaKam/FrugalScore)\n- [FrugalScore paper](https://arxiv.org/abs/2110.08559) \n"},"source":{"kind":"string","value":"huggingface/datasets/blob/main/metrics/frugalscore/README.md"}}},{"rowIdx":923,"cells":{"text":{"kind":"string","value":" Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym 🤖 [[hands-on]]\n\n\n \n\n\nNow that you've studied the theory behind Advantage Actor Critic (A2C), **you're ready to train your A2C agent** using Stable-Baselines3 in a robotic environment. And train a:\n- A robotic arm 🦾 to move to the correct position.\n\nWe're going to use \n- [panda-gym](https://github.com/qgallouedec/panda-gym)\n\nTo validate this hands-on for the certification process, you need to push your two trained models to the Hub and get the following results:\n\n- `PandaReachDense-v3` get a result of >= -3.5.\n\nTo find your result, [go to the leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward**\n\nFor more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process\n\n**To start the hands-on click on Open In Colab button** 👇 :\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit6/unit6.ipynb)\n\n\n# Unit 6: Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym 🤖\n\n### 🎮 Environments:\n\n- [Panda-Gym](https://github.com/qgallouedec/panda-gym)\n\n### 📚 RL-Library:\n\n- [Stable-Baselines3](https://stable-baselines3.readthedocs.io/)\n\nWe're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues).\n\n## Objectives of this notebook 🏆\n\nAt the end of the notebook, you will:\n\n- Be able to use **Panda-Gym**, the environment library.\n- Be able to **train robots using A2C**.\n- Understand why **we need to normalize the input**.\n- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥.\n\n## Prerequisites 🏗️\n\nBefore diving into the notebook, you need to:\n\n🔲 📚 Study [Actor-Critic methods by reading Unit 6](https://huggingface.co/deep-rl-course/unit6/introduction) 🤗\n\n# Let's train our first robots 🤖\n\n## Set the GPU 💪\n\n- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type`\n\n\"GPU\n\n- `Hardware Accelerator > GPU`\n\n\"GPU\n\n## Create a virtual display 🔽\n\nDuring the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames).\n\nThe following cell will install the librairies and create and run a virtual screen 🖥\n\n```python\n%%capture\n!apt install python-opengl\n!apt install ffmpeg\n!apt install xvfb\n!pip3 install pyvirtualdisplay\n```\n\n```python\n# Virtual display\nfrom pyvirtualdisplay import Display\n\nvirtual_display = Display(visible=0, size=(1400, 900))\nvirtual_display.start()\n```\n\n### Install dependencies 🔽\n\nWe’ll install multiple ones:\n\n- `gymnasium`\n- `panda-gym`: Contains the robotics arm environments.\n- `stable-baselines3`: The SB3 deep reinforcement learning library.\n- `huggingface_sb3`: Additional code for Stable-baselines3 to load and upload models from the Hugging Face 🤗 Hub.\n- `huggingface_hub`: Library allowing anyone to work with the Hub repositories.\n\n```bash\n!pip install stable-baselines3[extra]\n!pip install gymnasium\n!pip install huggingface_sb3\n!pip install huggingface_hub\n!pip install panda_gym\n```\n\n## Import the packages 📦\n\n```python\nimport os\n\nimport gymnasium as gym\nimport panda_gym\n\nfrom huggingface_sb3 import load_from_hub, package_to_hub\n\nfrom stable_baselines3 import A2C\nfrom stable_baselines3.common.evaluation import evaluate_policy\nfrom stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize\nfrom stable_baselines3.common.env_util import make_vec_env\n\nfrom huggingface_hub import notebook_login\n```\n\n## PandaReachDense-v3 🦾\n\nThe agent we're going to train is a robotic arm that needs to do controls (moving the arm and using the end-effector).\n\nIn robotics, the *end-effector* is the device at the end of a robotic arm designed to interact with the environment.\n\nIn `PandaReach`, the robot must place its end-effector at a target position (green ball).\n\nWe're going to use the dense version of this environment. It means we'll get a *dense reward function* that **will provide a reward at each timestep** (the closer the agent is to completing the task, the higher the reward). Contrary to a *sparse reward function* where the environment **return a reward if and only if the task is completed**.\n\nAlso, we're going to use the *End-effector displacement control*, it means the **action corresponds to the displacement of the end-effector**. We don't control the individual motion of each joint (joint control).\n\n\"Robotics\"/\n\nThis way **the training will be easier**.\n\n### Create the environment\n\n#### The environment 🎮\n\nIn `PandaReachDense-v3` the robotic arm must place its end-effector at a target position (green ball).\n\n```python\nenv_id = \"PandaReachDense-v3\"\n\n# Create the env\nenv = gym.make(env_id)\n\n# Get the state space and action space\ns_size = env.observation_space.shape\na_size = env.action_space\n```\n\n```python\nprint(\"_____OBSERVATION SPACE_____ \\n\")\nprint(\"The State Space is: \", s_size)\nprint(\"Sample observation\", env.observation_space.sample()) # Get a random observation\n```\n\nThe observation space **is a dictionary with 3 different elements**:\n\n- `achieved_goal`: (x,y,z) position of the goal.\n- `desired_goal`: (x,y,z) distance between the goal position and the current object position.\n- `observation`: position (x,y,z) and velocity of the end-effector (vx, vy, vz).\n\nGiven it's a dictionary as observation, **we will need to use a MultiInputPolicy policy instead of MlpPolicy**.\n\n```python\nprint(\"\\n _____ACTION SPACE_____ \\n\")\nprint(\"The Action Space is: \", a_size)\nprint(\"Action Space Sample\", env.action_space.sample()) # Take a random action\n```\n\nThe action space is a vector with 3 values:\n- Control x, y, z movement\n\n\n### Normalize observation and rewards\n\nA good practice in reinforcement learning is to [normalize input features](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html).\n\nFor that purpose, there is a wrapper that will compute a running average and standard deviation of input features.\n\nWe also normalize rewards with this same wrapper by adding `norm_reward = True`\n\n[You should check the documentation to fill this cell](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecnormalize)\n\n```python\nenv = make_vec_env(env_id, n_envs=4)\n\n# Adding this wrapper to normalize the observation and the reward\nenv = # TODO: Add the wrapper\n```\n\n#### Solution\n\n```python\nenv = make_vec_env(env_id, n_envs=4)\n\nenv = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.)\n```\n\n### Create the A2C Model 🤖\n\nFor more information about A2C implementation with StableBaselines3 check: https://stable-baselines3.readthedocs.io/en/master/modules/a2c.html#notes\n\nTo find the best parameters I checked the [official trained agents by Stable-Baselines3 team](https://huggingface.co/sb3).\n\n```python\nmodel = # Create the A2C model and try to find the best parameters\n```\n\n#### Solution\n\n```python\nmodel = A2C(policy = \"MultiInputPolicy\",\n env = env,\n verbose=1)\n```\n\n### Train the A2C agent 🏃\n\n- Let's train our agent for 1,000,000 timesteps, don't forget to use GPU on Colab. It will take approximately ~25-40min\n\n```python\nmodel.learn(1_000_000)\n```\n\n```python\n# Save the model and VecNormalize statistics when saving the agent\nmodel.save(\"a2c-PandaReachDense-v3\")\nenv.save(\"vec_normalize.pkl\")\n```\n\n### Evaluate the agent 📈\n\n- Now that's our agent is trained, we need to **check its performance**.\n- Stable-Baselines3 provides a method to do that: `evaluate_policy`\n\n```python\nfrom stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize\n\n# Load the saved statistics\neval_env = DummyVecEnv([lambda: gym.make(\"PandaReachDense-v3\")])\neval_env = VecNormalize.load(\"vec_normalize.pkl\", eval_env)\n\n# We need to override the render_mode\neval_env.render_mode = \"rgb_array\"\n\n# do not update them at test time\neval_env.training = False\n# reward normalization is not needed at test time\neval_env.norm_reward = False\n\n# Load the agent\nmodel = A2C.load(\"a2c-PandaReachDense-v3\")\n\nmean_reward, std_reward = evaluate_policy(model, eval_env)\n\nprint(f\"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}\")\n```\n### Publish your trained model on the Hub 🔥\n\nNow that we saw we got good results after the training, we can publish our trained model on the Hub with one line of code.\n\n📚 The libraries documentation 👉 https://github.com/huggingface/huggingface_sb3/tree/main#hugging-face--x-stable-baselines3-v20\n\nBy using `package_to_hub`, as we already mentionned in the former units, **you evaluate, record a replay, generate a model card of your agent and push it to the hub**.\n\nThis way:\n- You can **showcase our work** 🔥\n- You can **visualize your agent playing** 👀\n- You can **share with the community an agent that others can use** 💾\n- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard\n\nTo be able to share your model with the community there are three more steps to follow:\n\n1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join\n\n2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.\n- Create a new token (https://huggingface.co/settings/tokens) **with write role**\n\n\"Create\n\n- Copy the token\n- Run the cell below and paste the token\n\n```python\nnotebook_login()\n!git config --global credential.helper store\n```\nIf you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login`\n\n3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function.\nFor this environment, **running this cell can take approximately 10min**\n\n```python\nfrom huggingface_sb3 import package_to_hub\n\npackage_to_hub(\n model=model,\n model_name=f\"a2c-{env_id}\",\n model_architecture=\"A2C\",\n env_id=env_id,\n eval_env=eval_env,\n repo_id=f\"ThomasSimonini/a2c-{env_id}\", # Change the username\n commit_message=\"Initial commit\",\n)\n```\n\n## Some additional challenges 🏆\n\nThe best way to learn **is to try things by your own**! Why not trying `PandaPickAndPlace-v3`?\n\nIf you want to try more advanced tasks for panda-gym, you need to check what was done using **TQC or SAC** (a more sample-efficient algorithm suited for robotics tasks). In real robotics, you'll use a more sample-efficient algorithm for a simple reason: contrary to a simulation **if you move your robotic arm too much, you have a risk of breaking it**.\n\nPandaPickAndPlace-v1 (this model uses the v1 version of the environment): https://huggingface.co/sb3/tqc-PandaPickAndPlace-v1\n\nAnd don't hesitate to check panda-gym documentation here: https://panda-gym.readthedocs.io/en/latest/usage/train_with_sb3.html\n\nWe provide you the steps to train another agent (optional):\n\n1. Define the environment called \"PandaPickAndPlace-v3\"\n2. Make a vectorized environment\n3. Add a wrapper to normalize the observations and rewards. [Check the documentation](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecnormalize)\n4. Create the A2C Model (don't forget verbose=1 to print the training logs).\n5. Train it for 1M Timesteps\n6. Save the model and VecNormalize statistics when saving the agent\n7. Evaluate your agent\n8. Publish your trained model on the Hub 🔥 with `package_to_hub`\n\n\n### Solution (optional)\n\n```python\n# 1 - 2\nenv_id = \"PandaPickAndPlace-v3\"\nenv = make_vec_env(env_id, n_envs=4)\n\n# 3\nenv = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.)\n\n# 4\nmodel = A2C(policy = \"MultiInputPolicy\",\n env = env,\n verbose=1)\n# 5\nmodel.learn(1_000_000)\n```\n\n```python\n# 6\nmodel_name = \"a2c-PandaPickAndPlace-v3\";\nmodel.save(model_name)\nenv.save(\"vec_normalize.pkl\")\n\n# 7\nfrom stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize\n\n# Load the saved statistics\neval_env = DummyVecEnv([lambda: gym.make(\"PandaPickAndPlace-v3\")])\neval_env = VecNormalize.load(\"vec_normalize.pkl\", eval_env)\n\n# do not update them at test time\neval_env.training = False\n# reward normalization is not needed at test time\neval_env.norm_reward = False\n\n# Load the agent\nmodel = A2C.load(model_name)\n\nmean_reward, std_reward = evaluate_policy(model, eval_env)\n\nprint(f\"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}\")\n\n# 8\npackage_to_hub(\n model=model,\n model_name=f\"a2c-{env_id}\",\n model_architecture=\"A2C\",\n env_id=env_id,\n eval_env=eval_env,\n repo_id=f\"ThomasSimonini/a2c-{env_id}\", # TODO: Change the username\n commit_message=\"Initial commit\",\n)\n```\n\nSee you on Unit 7! 🔥\n\n## Keep learning, stay awesome 🤗\n"},"source":{"kind":"string","value":"huggingface/deep-rl-class/blob/main/units/en/unit6/hands-on.mdx"}}},{"rowIdx":924,"cells":{"text":{"kind":"string","value":"--\ntitle: \"We Raised $100 Million for Open & Collaborative Machine Learning 🚀\"\nthumbnail: /blog/assets/65_series_c/thumbnail.jpg\nauthors:\n- user: huggingface\n---\n\n# We Raised $100 Million for Open & Collaborative Machine Learning 🚀\n\n\nToday we have some exciting news to share! Hugging Face has raised $100 Million in Series C funding 🔥🔥🔥 led by Lux Capital with major participations from Sequoia, Coatue and support of existing investors Addition, a_capital, SV Angel, Betaworks, AIX Ventures, Kevin Durant, Rich Kleiman from Thirty Five Ventures, Olivier Pomel (co-founder & CEO at Datadog) and more.\n\n
\n \"Series\n
\n\nWe've come a long way since we first open sourced [PyTorch BERT](https://twitter.com/Thom_Wolf/status/1068637731281088513) in 2018 and are just getting started! 🙌\n\nMachine learning is becoming the default way to build technology. When you think about your average day, machine learning is everywhere: from your Zoom background, to searching on Google, to ordering an Uber or writing an email with auto-complete --it's all machine learning.\n\nHugging Face is now the fastest growing community & most used platform for machine learning! With 100,000 pre-trained models & 10,000 datasets hosted on the platform for NLP, computer vision, speech, time-series, biology, reinforcement learning, chemistry and more, the [Hugging Face Hub](https://huggingface.co/models) has become the Home of Machine Learning to create, collaborate, and deploy state-of-the-art models.\n\n
\n \"The\n
\n\nOver 10,000 companies are now using Hugging Face to build technology with machine learning. Their Machine Learning scientists, Data scientists and Machine Learning engineers have saved countless hours while accelerating their machine learning roadmaps with the help of our [products](https://huggingface.co/platform) and [services](https://huggingface.co/support). \n\nWe want to have a positive impact on the AI field. We think the direction of more responsible AI is through openly sharing models, datasets, training procedures, evaluation metrics and working together to solve issues. We believe open source and open science bring trust, robustness, reproducibility, and continuous innovation. With this in mind, we are leading [BigScience](https://bigscience.huggingface.co/), a collaborative workshop around the study and creation of very large language models gathering more than 1,000 researchers of all backgrounds and disciplines. We are now training the [world's largest open source multilingual language model](https://twitter.com/BigScienceLLM) 🌸\n\n⚠️ But there’s still a huge amount of work left to do.\n\nAt Hugging Face, we know that Machine Learning has some important limitations and challenges that need to be tackled now like biases, privacy, and energy consumption. With openness, transparency & collaboration, we can foster responsible & inclusive progress, understanding & accountability to mitigate these challenges.\n\nThanks to the new funding, we’ll be doubling down on research, open-source, products and responsible democratization of AI.\n\n
\n \"The\n
\n\nIt's been a hell of a ride to grow from 30 to 120+ team members in the past 12 months. We were super lucky to have been joined by incredibly talented (and fun!) teammates like [Dr. Margaret Mitchell](https://www.bloomberg.com/news/articles/2021-08-24/fired-at-google-after-critical-work-ai-researcher-mitchell-to-join-hugging-face) and the [Gradio team](https://gradio.app/joining-huggingface/), and we don't plan to stop here. We're [hiring for every position](https://apply.workable.com/huggingface) you can think of for every level of seniority. We are a remote-friendly, decentralized organization with transparency and value-inspired decision making by default.\n\nHuge thanks to every contributor in our amazing community and team, our customers, partners, and investors for helping us reach this point. We couldn't have done it without you, and we can't wait to work together with you on what's next. Your contributions are key to helping build a better future where AI is founded on open source, open science, ethics and collaboration.\n\n---\n\n*For press inquiries, please contact team@huggingface.co*\n"},"source":{"kind":"string","value":"huggingface/blog/blob/main/series-c.md"}}},{"rowIdx":925,"cells":{"text":{"kind":"string","value":" How to configure SAML SSO with Azure\n\nIn this guide, we will use Azure as the SSO provider and with the Security Assertion Markup Language (SAML) protocol as our preferred identity protocol. \n\nWe currently support SP-initiated and IdP-initiated authentication. User provisioning is not yet supported at this time.\n\n\n\tThis feature is part of the Enterprise Hub.\n\n\n### Step 1: Create a new application in your Identity Provider\n\nOpen a new tab/window in your browser and sign in to the Azure portal of your organization.\n\nNavigate to \"Enterprise applications\" and click the \"New application\" button.\n\n
\n\n
\n\nYou'll be redirected to this page, click on \"Create your own application\", fill the name of your application, and then \"Create\" the application.\n\n
\n\n
\n\nThen select \"Single Sign-On\", and select SAML\n\n
\n\n
\n\n\n### Step 2: Configure your application on Azure\n\nOpen a new tab/window in your browser and navigate to the SSO section of your organization's settings. Select the SAML protocol.\n\n
\n\n\n
\n\n
\n\n\n
\n\nCopy the \"SP Entity Id\" from the organization's settings on Hugging Face, and paste it in the \"Identifier (Entity Id)\" field on Azure (1).\n\nCopy the \"Assertion Consumer Service URL\" from the organization's settings on Hugging Face, and paste it in the \"Reply URL\" field on Azure (2).\n\n\nThe URL looks like this: `https://huggingface.co/organizations/[organizationIdentifier]/saml/consume`.\n\n
\n\n
\n\nThen under \"SAML Certificates\", verify that \"Signin Option\" is set to \"Sign SAML response and assertion\".\n\n
\n\n
\n\n\nSave your new application.\n\n### Step 3: Finalize configuration on Hugging Face\n\nIn your Azure application, under \"Set up\", find the following field:\n- Login Url\n\nAnd under \"SAML Certificates\":\n- Download the \"Certificate (base64)\"\n\nYou will need them to finalize the SSO setup on Hugging Face.\n\n\n
\n\n
\n\nIn the SSO section of your organization's settings, copy-paste these values from Azure:\n\n- Login Url -> Sign-on URL\n- Certificate -> Public certificate\n\nThe public certificate must have the following format:\n\n```\n-----BEGIN CERTIFICATE-----\n{certificate}\n-----END CERTIFICATE-----\n```\n\n
\n\n\n
\n\nYou can now click on \"Update and Test SAML configuration\" to save the settings.\n\nYou should be redirected to your SSO provider (IdP) login prompt. Once logged in, you'll be redirected to your organization's settings page.\n\nA green check mark near the SAML selector will attest that the test was successful.\n\n\n
\n\t\n\t\n
\n\n### Step 4: Enable SSO in your organization\n\nNow that Single Sign-On is configured and tested, you can enable it for members of your organization by clicking on the \"Enable\" button.\n\nOnce enabled, members of your organization must complete the SSO authentication flow described in [How does it work?](./security-sso#how-does-it-work).\n"},"source":{"kind":"string","value":"huggingface/hub-docs/blob/main/docs/hub/security-sso-azure-saml.md"}}},{"rowIdx":926,"cells":{"text":{"kind":"string","value":" Gradio Spaces\n\n**Gradio** provides an easy and intuitive interface for running a model from a list of inputs and displaying the outputs in formats such as images, audio, 3D objects, and more. Gradio now even has a [Plot output component](https://gradio.app/docs/#o_plot) for creating data visualizations with Matplotlib, Bokeh, and Plotly! For more details, take a look at the [Getting started](https://gradio.app/getting_started/) guide from the Gradio team.\n\nSelecting **Gradio** as the SDK when [creating a new Space](https://huggingface.co/new-space) will initialize your Space with the latest version of Gradio by setting the `sdk` property to `gradio` in your `README.md` file's YAML block. If you'd like to change the Gradio version, you can edit the `sdk_version` property.\n\nVisit the [Gradio documentation](https://gradio.app/docs/) to learn all about its features and check out the [Gradio Guides](https://gradio.app/guides/) for some handy tutorials to help you get started!\n\n## Your First Gradio Space: Hot Dog Classifier\n\nIn the following sections, you'll learn the basics of creating a Space, configuring it, and deploying your code to it. We'll create a **Hot Dog Classifier** Space with Gradio that'll be used to demo the [julien-c/hotdog-not-hotdog](https://huggingface.co/julien-c/hotdog-not-hotdog) model, which can detect whether a given picture contains a hot dog 🌭\n\nYou can find a completed version of this hosted at [NimaBoscarino/hotdog-gradio](https://huggingface.co/spaces/NimaBoscarino/hotdog-gradio).\n\n## Create a new Gradio Space\n\nWe'll start by [creating a brand new Space](https://huggingface.co/new-space) and choosing **Gradio** as our SDK. Hugging Face Spaces are Git repositories, meaning that you can work on your Space incrementally (and collaboratively) by pushing commits. Take a look at the [Getting Started with Repositories](./repositories-getting-started) guide to learn about how you can create and edit files before continuing.\n\n## Add the dependencies\n\nFor the **Hot Dog Classifier** we'll be using a [🤗 Transformers pipeline](https://huggingface.co/docs/transformers/pipeline_tutorial) to use the model, so we need to start by installing a few dependencies. This can be done by creating a **requirements.txt** file in our repository, and adding the following dependencies to it:\n\n```\ntransformers\ntorch\n```\n\nThe Spaces runtime will handle installing the dependencies!\n\n## Create the Gradio interface\n\nTo create the Gradio app, make a new file in the repository called **app.py**, and add the following code:\n\n```python\nimport gradio as gr\nfrom transformers import pipeline\n\npipeline = pipeline(task=\"image-classification\", model=\"julien-c/hotdog-not-hotdog\")\n\ndef predict(input_img):\n predictions = pipeline(input_img)\n return input_img, {p[\"label\"]: p[\"score\"] for p in predictions} \n\ngradio_app = gr.Interface(\n predict,\n inputs=gr.Image(label=\"Select hot dog candidate\", sources=['upload', 'webcam'], type=\"pil\"),\n outputs=[gr.Image(label=\"Processed Image\"), gr.Label(label=\"Result\", num_top_classes=2)],\n title=\"Hot Dog? Or Not?\",\n)\n\nif __name__ == \"__main__\":\n gradio_app.launch()\n```\n\nThis Python script uses a [🤗 Transformers pipeline](https://huggingface.co/docs/transformers/pipeline_tutorial) to load the [julien-c/hotdog-not-hotdog](https://huggingface.co/julien-c/hotdog-not-hotdog) model, which is used by the Gradio interface. The Gradio app will expect you to upload an image, which it'll then classify as *hot dog* or *not hot dog*. Once you've saved the code to the **app.py** file, visit the **App** tab to see your app in action!\n\n
\n\n\n
\n\n## Embed Gradio Spaces on other webpages\n\nYou can embed a Gradio Space on other webpages by using either Web Components or the HTML `